remove atomic signedness

This commit is contained in:
jacob 2025-07-11 12:14:09 -05:00
parent c1f405c6e4
commit ff8056b214
17 changed files with 169 additions and 218 deletions

View File

@ -54,9 +54,9 @@ void arena_release(struct arena *arena)
ASAN_UNPOISON(arena, arena->committed + ARENA_HEADER_SIZE); ASAN_UNPOISON(arena, arena->committed + ARENA_HEADER_SIZE);
__prof; __prof;
__proffree(arena); __proffree(arena);
gstat_sub(GSTAT_MEMORY_COMMITTED, arena->committed + ARENA_HEADER_SIZE); gstat_add(GSTAT_MEMORY_COMMITTED, -arena->committed - ARENA_HEADER_SIZE);
gstat_sub(GSTAT_MEMORY_RESERVED, arena->reserved); gstat_add(GSTAT_MEMORY_RESERVED, -arena->reserved);
gstat_sub(GSTAT_NUM_ARENAS, 1); gstat_add(GSTAT_NUM_ARENAS, -1);
sys_memory_release(arena); sys_memory_release(arena);
} }

View File

@ -3,35 +3,17 @@
#if PLATFORM_WINDOWS #if PLATFORM_WINDOWS
FORCE_INLINE i32 atomic_i32_fetch(struct atomic_i32 *x) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); } FORCE_INLINE i32 atomic32_fetch(struct atomic32 *x) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); }
FORCE_INLINE i32 atomic_i32_fetch_set(struct atomic_i32 *x, i32 e) { return (i32)_InterlockedExchange((volatile long *)&x->_v, e); } FORCE_INLINE i32 atomic32_fetch_set(struct atomic32 *x, i32 e) { return (i32)_InterlockedExchange((volatile long *)&x->_v, e); }
FORCE_INLINE i32 atomic_i32_fetch_test_set(struct atomic_i32 *x, i32 c, i32 e) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, e, c); } FORCE_INLINE i32 atomic32_fetch_test_set(struct atomic32 *x, i32 c, i32 e) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, e, c); }
FORCE_INLINE i32 atomic_i32_fetch_xor(struct atomic_i32 *x, i32 c) { return (i32)_InterlockedXor((volatile long *)&x->_v,c); } FORCE_INLINE i32 atomic32_fetch_xor(struct atomic32 *x, i32 c) { return (i32)_InterlockedXor((volatile long *)&x->_v,c); }
FORCE_INLINE i32 atomic_i32_fetch_add(struct atomic_i32 *x, i32 a) { return (i32)_InterlockedExchangeAdd((volatile long *)&x->_v, a); } FORCE_INLINE i32 atomic32_fetch_add(struct atomic32 *x, i32 a) { return (i32)_InterlockedExchangeAdd((volatile long *)&x->_v, a); }
FORCE_INLINE i64 atomic_i64_fetch(struct atomic_i64 *x) { return (i64)_InterlockedCompareExchange64(&x->_v, 0, 0); } FORCE_INLINE i64 atomic64_fetch(struct atomic64 *x) { return (i64)_InterlockedCompareExchange64(&x->_v, 0, 0); }
FORCE_INLINE i64 atomic_i64_fetch_set(struct atomic_i64 *x, i64 e) { return (i64)_InterlockedExchange64(&x->_v, e); } FORCE_INLINE i64 atomic64_fetch_set(struct atomic64 *x, i64 e) { return (i64)_InterlockedExchange64(&x->_v, e); }
FORCE_INLINE i64 atomic_i64_fetch_test_set(struct atomic_i64 *x, i64 c, i64 e) { return (i64)_InterlockedCompareExchange64(&x->_v, e, c); } FORCE_INLINE i64 atomic64_fetch_test_set(struct atomic64 *x, i64 c, i64 e) { return (i64)_InterlockedCompareExchange64(&x->_v, e, c); }
FORCE_INLINE i64 atomic_i64_fetch_xor(struct atomic_i64 *x, i64 c) { return (i64)_InterlockedXor64(&x->_v, c); } FORCE_INLINE i64 atomic64_fetch_xor(struct atomic64 *x, i64 c) { return (i64)_InterlockedXor64(&x->_v, c); }
FORCE_INLINE i64 atomic_i64_fetch_add(struct atomic_i64 *x, i64 a) { return (i64)_InterlockedExchangeAdd64(&x->_v, a); } FORCE_INLINE i64 atomic64_fetch_add(struct atomic64 *x, i64 a) { return (i64)_InterlockedExchangeAdd64(&x->_v, a); }
FORCE_INLINE u32 atomic_u32_fetch(struct atomic_u32 *x) { return (u32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); }
FORCE_INLINE u32 atomic_u32_fetch_set(struct atomic_u32 *x, u32 e) { return (u32)_InterlockedExchange((volatile long *)&x->_v, (long)e); }
FORCE_INLINE u32 atomic_u32_fetch_test_set(struct atomic_u32 *x, u32 c, u32 e) { return (u32)_InterlockedCompareExchange((volatile long *)&x->_v, (long)e, (long)c); }
FORCE_INLINE u32 atomic_u32_fetch_xor(struct atomic_u32 *x, u32 c) { return (u32)_InterlockedXor((volatile long *)&x->_v, (long)c); }
FORCE_INLINE u32 atomic_u32_fetch_add_u32(struct atomic_u32 *x, u32 a) { return (u32)_InterlockedExchangeAdd((volatile long *)&x->_v, (long)a); }
FORCE_INLINE u32 atomic_u32_fetch_add_i32(struct atomic_u32 *x, i32 a) { return (u32)_InterlockedExchangeAdd((volatile long *)&x->_v, (long)a); }
FORCE_INLINE u64 atomic_u64_fetch(struct atomic_u64 *x) { return (u64)_InterlockedCompareExchange64((volatile i64 *)&x->_v, 0, 0); }
FORCE_INLINE u64 atomic_u64_fetch_set(struct atomic_u64 *x, u64 e) { return (u64)_InterlockedExchange64((volatile i64 *)&x->_v, (i64)e); }
FORCE_INLINE u64 atomic_u64_fetch_test_set(struct atomic_u64 *x, u64 c, u64 e) { return (u64)_InterlockedCompareExchange64((volatile i64 *)&x->_v, (i64)e, (i64)c); }
FORCE_INLINE u32 atomic_u64_fetch_xor(struct atomic_u64 *x, u64 c) { return (u64)_InterlockedXor64((volatile i64 *)&x->_v, (i64)c); }
FORCE_INLINE u64 atomic_u64_fetch_add_u64(struct atomic_u64 *x, u64 a) { return (u64)_InterlockedExchangeAdd64((volatile i64 *)&x->_v, (i64)a); }
FORCE_INLINE u64 atomic_u64_fetch_add_i64(struct atomic_u64 *x, i64 a) { return (u64)_InterlockedExchangeAdd64((volatile i64 *)&x->_v, (i64)a); }
FORCE_INLINE void *atomic_ptr_fetch(struct atomic_ptr *x) { return (void *)_InterlockedCompareExchange64((volatile i64 *)&x->_v, 0, 0); }
FORCE_INLINE void *atomic_ptr_fetch_set(struct atomic_ptr *x, void *e) { return (void *)_InterlockedExchange64((volatile i64 *)&x->_v, (i64)e); }
FORCE_INLINE void *atomic_ptr_fetch_test_set(struct atomic_ptr *x, void *c, void *e) { return (void *)_InterlockedCompareExchange64((volatile i64 *)&x->_v, (i64)e, (i64)c); }
#else #else
# error "Atomics not implemented" # error "Atomics not implemented"

View File

@ -391,59 +391,29 @@ GLOBAL const f64 *_f64_nan = (f64 *)&_f64_nan_u64;
* Atomics * Atomics
* ========================== */ * ========================== */
struct atomic_i32 { struct atomic32 {
volatile i32 _v; volatile i32 _v;
}; };
struct atomic_i64 { struct atomic64 {
volatile i64 _v; volatile i64 _v;
}; };
struct atomic_u32 {
volatile u32 _v;
};
struct atomic_u64 {
volatile u64 _v;
};
struct atomic_ptr {
volatile void *_v;
};
/* ========================== * /* ========================== *
* Cache-line isolated atomics * Cache-line isolated atomics
* ========================== */ * ========================== */
struct alignas(64) atomic_i32_padded { struct alignas(64) atomic32_padded {
struct atomic_i32 v; struct atomic32 v;
u8 _pad[60]; u8 _pad[60];
}; };
STATIC_ASSERT(sizeof(struct atomic_i32_padded) == 64 && alignof(struct atomic_i32_padded) == 64); STATIC_ASSERT(sizeof(struct atomic32_padded) == 64 && alignof(struct atomic32_padded) == 64);
struct alignas(64) atomic_i64_padded { struct alignas(64) atomic64_padded {
struct atomic_i64 v; struct atomic64 v;
u8 _pad[56]; u8 _pad[56];
}; };
STATIC_ASSERT(sizeof(struct atomic_i64_padded) == 64 && alignof(struct atomic_i64_padded) == 64); STATIC_ASSERT(sizeof(struct atomic64_padded) == 64 && alignof(struct atomic64_padded) == 64);
struct alignas(64) atomic_u32_padded {
struct atomic_u32 v;
u8 _pad[60];
};
STATIC_ASSERT(sizeof(struct atomic_u32_padded) == 64 && alignof(struct atomic_u32_padded) == 64);
struct alignas(64) atomic_u64_padded {
struct atomic_u64 v;
u8 _pad[56];
};
STATIC_ASSERT(sizeof(struct atomic_u64_padded) == 64 && alignof(struct atomic_u64_padded) == 64);
struct alignas(64) atomic_ptr_padded {
struct atomic_ptr v;
u8 _pad[56];
};
STATIC_ASSERT(sizeof(struct atomic_ptr_padded) == 64 && alignof(struct atomic_ptr_padded) == 64);
/* ========================== * /* ========================== *
* Common structs * Common structs

View File

@ -268,7 +268,7 @@ struct fenced_release_data {
* ========================== */ * ========================== */
GLOBAL struct { GLOBAL struct {
struct atomic_i32 initialized; struct atomic32 initialized;
/* Descriptor heaps pool */ /* Descriptor heaps pool */
struct snc_mutex command_descriptor_heaps_mutex; struct snc_mutex command_descriptor_heaps_mutex;
@ -327,7 +327,7 @@ GLOBAL struct {
struct command_queue *command_queues[DX12_NUM_QUEUES]; struct command_queue *command_queues[DX12_NUM_QUEUES];
/* Evictor thread */ /* Evictor thread */
struct atomic_i32 evictor_thread_shutdown; struct atomic32 evictor_thread_shutdown;
HANDLE evictor_thread_wake_event; HANDLE evictor_thread_wake_event;
struct sys_thread *evictor_thread; struct sys_thread *evictor_thread;
} G = ZI, DEBUG_ALIAS(G, G_gp_dx12); } G = ZI, DEBUG_ALIAS(G, G_gp_dx12);
@ -353,7 +353,7 @@ INTERNAL RESOURCE_WATCH_CALLBACK_FUNC_DEF(pipeline_resource_watch_callback, name
void gp_startup(void) void gp_startup(void)
{ {
__prof; __prof;
if (atomic_i32_fetch_test_set(&G.initialized, 0, 1) != 0) { if (atomic32_fetch_test_set(&G.initialized, 0, 1) != 0) {
sys_panic(LIT("GP layer already initialized")); sys_panic(LIT("GP layer already initialized"));
} }
@ -410,7 +410,7 @@ INTERNAL SYS_EXIT_FUNC(gp_shutdown)
(UNUSED)command_queue_release; (UNUSED)command_queue_release;
#endif #endif
atomic_i32_fetch_set(&G.evictor_thread_shutdown, 1); atomic32_fetch_set(&G.evictor_thread_shutdown, 1);
SetEvent(G.evictor_thread_wake_event); SetEvent(G.evictor_thread_wake_event);
sys_thread_wait_release(G.evictor_thread); sys_thread_wait_release(G.evictor_thread);
} }
@ -2879,7 +2879,7 @@ INTERNAL SYS_THREAD_DEF(evictor_thread_entry_point, arg)
u64 completed_targets[DX12_NUM_QUEUES] = ZI; u64 completed_targets[DX12_NUM_QUEUES] = ZI;
b32 shutdown = atomic_i32_fetch(&G.evictor_thread_shutdown); b32 shutdown = atomic32_fetch(&G.evictor_thread_shutdown);
while (!shutdown) { while (!shutdown) {
struct arena_temp temp = arena_temp_begin(scratch.arena); struct arena_temp temp = arena_temp_begin(scratch.arena);
{ {
@ -2913,7 +2913,7 @@ INTERNAL SYS_THREAD_DEF(evictor_thread_entry_point, arg)
{ {
__profn("Wait on fence"); __profn("Wait on fence");
WaitForMultipleObjects(2, events, 0, INFINITE); WaitForMultipleObjects(2, events, 0, INFINITE);
shutdown = atomic_i32_fetch(&G.evictor_thread_shutdown); shutdown = atomic32_fetch(&G.evictor_thread_shutdown);
} }
} }
} }
@ -2949,7 +2949,7 @@ INTERNAL SYS_THREAD_DEF(evictor_thread_entry_point, arg)
{ {
__profn("Sleep"); __profn("Sleep");
WaitForSingleObject(G.evictor_thread_wake_event, INFINITE); WaitForSingleObject(G.evictor_thread_wake_event, INFINITE);
shutdown = atomic_i32_fetch(&G.evictor_thread_shutdown); shutdown = atomic32_fetch(&G.evictor_thread_shutdown);
} }
} }

View File

@ -8,19 +8,18 @@
#include "atomic.h" #include "atomic.h"
struct _gstats { struct _gstats {
struct atomic_u64_padded GSTAT_SOCK_BYTES_SENT; struct atomic64_padded GSTAT_SOCK_BYTES_SENT;
struct atomic_u64_padded GSTAT_SOCK_BYTES_RECEIVED; struct atomic64_padded GSTAT_SOCK_BYTES_RECEIVED;
struct atomic_u64_padded GSTAT_MEMORY_COMMITTED; struct atomic64_padded GSTAT_MEMORY_COMMITTED;
struct atomic_u64_padded GSTAT_MEMORY_RESERVED; struct atomic64_padded GSTAT_MEMORY_RESERVED;
struct atomic_u64_padded GSTAT_NUM_ARENAS; struct atomic64_padded GSTAT_NUM_ARENAS;
}; };
extern struct _gstats _g_gstats; extern struct _gstats _g_gstats;
#define gstat_set(name, value) atomic_u64_fetch_set(&_g_gstats.name.v, (value)) #define gstat_set(name, value) atomic64_fetch_set(&_g_gstats.name.v, (value))
#define gstat_add(name, value) atomic_u64_fetch_add_u64(&_g_gstats.name.v, (value)) #define gstat_add(name, value) atomic64_fetch_add(&_g_gstats.name.v, (value))
#define gstat_sub(name, value) atomic_u64_fetch_add_i64(&_g_gstats.name.v, -((i64)(value))) #define gstat_get(name) atomic64_fetch(&_g_gstats.name.v)
#define gstat_get(name) atomic_u64_fetch(&_g_gstats.name.v)
#else #else

View File

@ -205,7 +205,7 @@ struct host *host_alloc(u16 listen_port)
void host_release(struct host *host) void host_release(struct host *host)
{ {
atomic_i32_fetch_set(&host->receiver_thread_shutdown_flag.v, 1); atomic32_fetch_set(&host->receiver_thread_shutdown_flag.v, 1);
sock_wake(host->sock); sock_wake(host->sock);
while (!sys_thread_try_release(host->receiver_thread, 0.001f)) { while (!sys_thread_try_release(host->receiver_thread, 0.001f)) {
sock_wake(host->sock); sock_wake(host->sock);
@ -1072,11 +1072,11 @@ INTERNAL SYS_THREAD_DEF(host_receiver_thread_entry_point, arg)
socks.socks = &host->sock; socks.socks = &host->sock;
socks.count = 1; socks.count = 1;
struct atomic_i32 *shutdown = &host->receiver_thread_shutdown_flag.v; struct atomic32 *shutdown = &host->receiver_thread_shutdown_flag.v;
while (!atomic_i32_fetch(shutdown)) { while (!atomic32_fetch(shutdown)) {
struct sock *sock = sock_wait_for_available_read(socks, F32_INFINITY); struct sock *sock = sock_wait_for_available_read(socks, F32_INFINITY);
struct sock_read_result res; struct sock_read_result res;
while (!atomic_i32_fetch(shutdown) && sock && (res = sock_read(sock, read_buff)).valid) { while (!atomic32_fetch(shutdown) && sock && (res = sock_read(sock, read_buff)).valid) {
struct sock_address address = res.address; struct sock_address address = res.address;
struct string data = res.data; struct string data = res.data;
if (data.len > 0) { if (data.len > 0) {

View File

@ -98,7 +98,7 @@ struct host {
u64 bytes_received; u64 bytes_received;
u64 bytes_sent; u64 bytes_sent;
struct atomic_i32_padded receiver_thread_shutdown_flag; struct atomic32_padded receiver_thread_shutdown_flag;
struct sys_thread *receiver_thread; struct sys_thread *receiver_thread;
}; };

View File

@ -47,12 +47,12 @@ INTERNAL BOOL CALLBACK enum_func(HMODULE module, LPCWSTR type, LPCWSTR wstr_entr
struct string _incbin_get(struct _incbin_rc_resource *inc) struct string _incbin_get(struct _incbin_rc_resource *inc)
{ {
enum _incbin_state state = atomic_i32_fetch(&inc->state); enum _incbin_state state = atomic32_fetch(&inc->state);
if (state != INCBIN_STATE_SEARCHED) { if (state != INCBIN_STATE_SEARCHED) {
struct arena_temp scratch = scratch_begin_no_conflict(); struct arena_temp scratch = scratch_begin_no_conflict();
if (state == INCBIN_STATE_UNSEARCHED) { if (state == INCBIN_STATE_UNSEARCHED) {
enum _incbin_state v = atomic_i32_fetch_test_set(&inc->state, state, INCBIN_STATE_SEARCHING); enum _incbin_state v = atomic32_fetch_test_set(&inc->state, state, INCBIN_STATE_SEARCHING);
if (v == state) { if (v == state) {
/* Search RC file for the resource name */ /* Search RC file for the resource name */
struct string name_lower = string_lower(scratch.arena, inc->rc_name); struct string name_lower = string_lower(scratch.arena, inc->rc_name);
@ -65,7 +65,7 @@ struct string _incbin_get(struct _incbin_rc_resource *inc)
} }
inc->data = params.data; inc->data = params.data;
state = INCBIN_STATE_SEARCHED; state = INCBIN_STATE_SEARCHED;
atomic_i32_fetch_set(&inc->state, state); atomic32_fetch_set(&inc->state, state);
} else { } else {
state = v; state = v;
} }
@ -74,7 +74,7 @@ struct string _incbin_get(struct _incbin_rc_resource *inc)
/* Spin while another thread searches */ /* Spin while another thread searches */
while (state != INCBIN_STATE_SEARCHED) { while (state != INCBIN_STATE_SEARCHED) {
ix_pause(); ix_pause();
state = atomic_i32_fetch(&inc->state); state = atomic32_fetch(&inc->state);
} }
scratch_end(scratch); scratch_end(scratch);

View File

@ -22,7 +22,7 @@ enum _incbin_state {
}; };
struct _incbin_rc_resource { struct _incbin_rc_resource {
struct atomic_i32 state; struct atomic32 state;
struct string rc_name; struct string rc_name;
struct string data; struct string data;
}; };

View File

@ -15,7 +15,7 @@ struct log_event_callback {
* ========================== */ * ========================== */
GLOBAL struct { GLOBAL struct {
struct atomic_i32 initialized; struct atomic32 initialized;
struct snc_mutex callbacks_mutex; struct snc_mutex callbacks_mutex;
struct arena *callbacks_arena; struct arena *callbacks_arena;
@ -74,7 +74,7 @@ void log_startup(struct string logfile_path)
G.file_valid = 1; G.file_valid = 1;
} }
} }
atomic_i32_fetch_set(&G.initialized, 1); atomic32_fetch_set(&G.initialized, 1);
} }
/* ========================== * /* ========================== *
@ -83,7 +83,7 @@ void log_startup(struct string logfile_path)
void log_register_callback(log_event_callback_func *func, i32 level) void log_register_callback(log_event_callback_func *func, i32 level)
{ {
if (!atomic_i32_fetch(&G.initialized)) { return; } if (!atomic32_fetch(&G.initialized)) { return; }
struct snc_lock lock = snc_lock_e(&G.callbacks_mutex); struct snc_lock lock = snc_lock_e(&G.callbacks_mutex);
{ {
struct log_event_callback *callback = arena_push(G.callbacks_arena, struct log_event_callback); struct log_event_callback *callback = arena_push(G.callbacks_arena, struct log_event_callback);
@ -106,7 +106,7 @@ void log_register_callback(log_event_callback_func *func, i32 level)
INTERNAL void append_to_logfile(struct string msg) INTERNAL void append_to_logfile(struct string msg)
{ {
__prof; __prof;
if (!atomic_i32_fetch(&G.initialized)) { return; } if (!atomic32_fetch(&G.initialized)) { return; }
if (G.file_valid) { if (G.file_valid) {
struct arena_temp scratch = scratch_begin_no_conflict(); struct arena_temp scratch = scratch_begin_no_conflict();
@ -120,7 +120,7 @@ INTERNAL void append_to_logfile(struct string msg)
* writing to log file. */ * writing to log file. */
void _log_panic(struct string msg) void _log_panic(struct string msg)
{ {
if (!atomic_i32_fetch(&G.initialized)) { return; } if (!atomic32_fetch(&G.initialized)) { return; }
if (G.file_valid) { if (G.file_valid) {
sys_file_write(G.file, LIT("******** PANICKING ********\n")); sys_file_write(G.file, LIT("******** PANICKING ********\n"));
@ -136,7 +136,7 @@ void _log(i32 level, struct string msg)
#endif #endif
{ {
__prof; __prof;
if (!atomic_i32_fetch(&G.initialized)) { return; } if (!atomic32_fetch(&G.initialized)) { return; }
if (level < 0 || level >= LOG_LEVEL_COUNT) { if (level < 0 || level >= LOG_LEVEL_COUNT) {
sys_panic(LIT("Invalid log level")); sys_panic(LIT("Invalid log level"));
@ -232,7 +232,7 @@ void _logfv(i32 level, struct string file, u32 line, struct string fmt, va_list
void _logfv(i32 level, struct string fmt, va_list args) void _logfv(i32 level, struct string fmt, va_list args)
#endif #endif
{ {
if (!atomic_i32_fetch(&G.initialized)) { return; } if (!atomic32_fetch(&G.initialized)) { return; }
struct arena_temp scratch = scratch_begin_no_conflict(); struct arena_temp scratch = scratch_begin_no_conflict();
struct string msg = string_formatv(scratch.arena, fmt, args); struct string msg = string_formatv(scratch.arena, fmt, args);
#if LOG_INCLUDE_SOURCE_LOCATION #if LOG_INCLUDE_SOURCE_LOCATION
@ -249,7 +249,7 @@ void _logf(i32 level, struct string file, u32 line, struct string fmt, ...)
void _logf(i32 level, struct string fmt, ...) void _logf(i32 level, struct string fmt, ...)
#endif #endif
{ {
if (!atomic_i32_fetch(&G.initialized)) { return; } if (!atomic32_fetch(&G.initialized)) { return; }
va_list args; va_list args;
va_start(args, fmt); va_start(args, fmt);
#if LOG_INCLUDE_SOURCE_LOCATION #if LOG_INCLUDE_SOURCE_LOCATION

View File

@ -38,7 +38,7 @@ struct wasapi_buffer {
}; };
GLOBAL struct { GLOBAL struct {
struct atomic_i32 shutdown; struct atomic32 shutdown;
IAudioClient *client; IAudioClient *client;
HANDLE event; HANDLE event;
IAudioRenderClient *playback; IAudioRenderClient *playback;
@ -69,7 +69,7 @@ struct playback_startup_receipt playback_startup(struct mixer_startup_receipt *m
INTERNAL SYS_EXIT_FUNC(playback_shutdown) INTERNAL SYS_EXIT_FUNC(playback_shutdown)
{ {
__prof; __prof;
atomic_i32_fetch_set(&G.shutdown, 1); atomic32_fetch_set(&G.shutdown, 1);
sys_thread_wait_release(G.playback_scheduler_thread); sys_thread_wait_release(G.playback_scheduler_thread);
} }
@ -248,7 +248,7 @@ INTERNAL SYS_THREAD_DEF(playback_scheduler_entry, _)
/* FIXME: If playback fails at any point and mixer stops advancing, we /* FIXME: If playback fails at any point and mixer stops advancing, we
* need to halt mixer to prevent memory leak when sounds are played. */ * need to halt mixer to prevent memory leak when sounds are played. */
/* TODO: Signal counter that running job wiats on, rather than scheduling job manually */ /* TODO: Signal counter that running job wiats on, rather than scheduling job manually */
while (!atomic_i32_fetch(&G.shutdown)) { while (!atomic32_fetch(&G.shutdown)) {
{ {
__profn("Wasapi wait"); __profn("Wasapi wait");
WaitForSingleObject(G.event, INFINITE); WaitForSingleObject(G.event, INFINITE);

View File

@ -27,7 +27,7 @@ GLOBAL struct {
struct sys_thread *resource_watch_dispatch_thread; struct sys_thread *resource_watch_dispatch_thread;
struct sys_watch *watch; struct sys_watch *watch;
struct atomic_i32 watch_shutdown; struct atomic32 watch_shutdown;
struct snc_mutex watch_dispatcher_mutex; struct snc_mutex watch_dispatcher_mutex;
struct arena *watch_dispatcher_info_arena; struct arena *watch_dispatcher_info_arena;
@ -157,7 +157,7 @@ void resource_close(struct resource *res_ptr)
INTERNAL SYS_EXIT_FUNC(resource_shutdown) INTERNAL SYS_EXIT_FUNC(resource_shutdown)
{ {
__prof; __prof;
atomic_i32_fetch_set(&G.watch_shutdown, 1); atomic32_fetch_set(&G.watch_shutdown, 1);
{ {
struct snc_lock lock = snc_lock_e(&G.watch_dispatcher_mutex); struct snc_lock lock = snc_lock_e(&G.watch_dispatcher_mutex);
@ -188,10 +188,10 @@ INTERNAL SYS_THREAD_DEF(resource_watch_monitor_thread_entry_point, _)
(UNUSED)_; (UNUSED)_;
struct arena_temp scratch = scratch_begin_no_conflict(); struct arena_temp scratch = scratch_begin_no_conflict();
while (!atomic_i32_fetch(&G.watch_shutdown)) { while (!atomic32_fetch(&G.watch_shutdown)) {
struct arena_temp temp = arena_temp_begin(scratch.arena); struct arena_temp temp = arena_temp_begin(scratch.arena);
struct sys_watch_info_list res = sys_watch_wait(temp.arena, G.watch); struct sys_watch_info_list res = sys_watch_wait(temp.arena, G.watch);
if (res.first && !atomic_i32_fetch(&G.watch_shutdown)) { if (res.first && !atomic32_fetch(&G.watch_shutdown)) {
struct snc_lock lock = snc_lock_e(&G.watch_dispatcher_mutex); struct snc_lock lock = snc_lock_e(&G.watch_dispatcher_mutex);
{ {
struct sys_watch_info_list list_part = sys_watch_info_copy(G.watch_dispatcher_info_arena, res); struct sys_watch_info_list list_part = sys_watch_info_copy(G.watch_dispatcher_info_arena, res);
@ -239,9 +239,9 @@ INTERNAL SYS_THREAD_DEF(resource_watch_dispatcher_thread_entry_point, _)
struct arena_temp scratch = scratch_begin_no_conflict(); struct arena_temp scratch = scratch_begin_no_conflict();
struct snc_lock watch_dispatcher_lock = snc_lock_e(&G.watch_dispatcher_mutex); struct snc_lock watch_dispatcher_lock = snc_lock_e(&G.watch_dispatcher_mutex);
while (!atomic_i32_fetch(&G.watch_shutdown)) { while (!atomic32_fetch(&G.watch_shutdown)) {
snc_cv_wait(&G.watch_dispatcher_cv, &watch_dispatcher_lock); snc_cv_wait(&G.watch_dispatcher_cv, &watch_dispatcher_lock);
if (!atomic_i32_fetch(&G.watch_shutdown) && G.watch_dispatcher_info_arena->pos > 0) { if (!atomic32_fetch(&G.watch_shutdown) && G.watch_dispatcher_info_arena->pos > 0) {
__profn("Dispatch resource watch callbacks"); __profn("Dispatch resource watch callbacks");
/* Unlock and sleep a bit so duplicate events pile up */ /* Unlock and sleep a bit so duplicate events pile up */
{ {
@ -250,7 +250,7 @@ INTERNAL SYS_THREAD_DEF(resource_watch_dispatcher_thread_entry_point, _)
sys_wait(0, 0, 0, NS_FROM_SECONDS(WATCH_DISPATCHER_DELAY_SECONDS)); sys_wait(0, 0, 0, NS_FROM_SECONDS(WATCH_DISPATCHER_DELAY_SECONDS));
watch_dispatcher_lock = snc_lock_e(&G.watch_dispatcher_mutex); watch_dispatcher_lock = snc_lock_e(&G.watch_dispatcher_mutex);
} }
if (!atomic_i32_fetch(&G.watch_shutdown)) { if (!atomic32_fetch(&G.watch_shutdown)) {
struct arena_temp temp = arena_temp_begin(scratch.arena); struct arena_temp temp = arena_temp_begin(scratch.arena);
/* Pull watch info from queue */ /* Pull watch info from queue */

View File

@ -16,15 +16,15 @@ struct snc_lock snc_lock_spin_e(struct snc_mutex *m, i32 spin)
i32 spin_cnt = 0; i32 spin_cnt = 0;
while (!locked) { while (!locked) {
++spin_cnt; ++spin_cnt;
u32 v = atomic_u32_fetch_test_set(&m->v, 0, 0x80000000); u32 v = atomic32_fetch_test_set(&m->v, 0, 0x80000000);
if (v == 0) { if (v == 0) {
locked = 1; locked = 1;
} else if (v == 0x40000000) { } else if (v == 0x40000000) {
/* Lock has pending bit set, try to lock */ /* Lock has pending bit set, try to lock */
u32 swp = atomic_u32_fetch_test_set(&m->v, v, 0x80000000); u32 swp = atomic32_fetch_test_set(&m->v, v, 0x80000000);
while (swp != v && swp == 0x40000000) { while (swp != v && swp == 0x40000000) {
v = swp; v = swp;
swp = atomic_u32_fetch_test_set(&m->v, v, 0x80000000); swp = atomic32_fetch_test_set(&m->v, v, 0x80000000);
} }
v = swp; v = swp;
if (v == 0x40000000) { if (v == 0x40000000) {
@ -33,10 +33,10 @@ struct snc_lock snc_lock_spin_e(struct snc_mutex *m, i32 spin)
} }
if (!locked && (v & 0xC0000000) == 0) { if (!locked && (v & 0xC0000000) == 0) {
/* Lock has shared lockers and no pending waiter, set pending bit */ /* Lock has shared lockers and no pending waiter, set pending bit */
u32 swp = atomic_u32_fetch_test_set(&m->v, v, v | 0x40000000); u32 swp = atomic32_fetch_test_set(&m->v, v, v | 0x40000000);
while (swp != v && (swp & 0xC0000000) == 0 && swp != 0) { while (swp != v && (swp & 0xC0000000) == 0 && swp != 0) {
v = swp; v = swp;
swp = atomic_u32_fetch_test_set(&m->v, v, v | 0x40000000); swp = atomic32_fetch_test_set(&m->v, v, v | 0x40000000);
} }
v = swp; v = swp;
} }
@ -52,7 +52,7 @@ struct snc_lock snc_lock_spin_e(struct snc_mutex *m, i32 spin)
} }
#if RTC #if RTC
atomic_i32_fetch_set(&m->exclusive_fiber_id, sys_current_fiber_id()); atomic32_fetch_set(&m->exclusive_fiber_id, sys_current_fiber_id());
#endif #endif
struct snc_lock lock = ZI; struct snc_lock lock = ZI;
@ -67,10 +67,10 @@ struct snc_lock snc_lock_spin_s(struct snc_mutex *m, i32 spin)
i32 spin_cnt = 0; i32 spin_cnt = 0;
while (!locked) { while (!locked) {
++spin_cnt; ++spin_cnt;
u32 v = atomic_u32_fetch(&m->v); u32 v = atomic32_fetch(&m->v);
while (!locked && (v & 0xC0000000) == 0) { while (!locked && (v & 0xC0000000) == 0) {
/* Lock has no exclusive or pending exclusive lock, increment shared count */ /* Lock has no exclusive or pending exclusive lock, increment shared count */
u32 swp = atomic_u32_fetch_test_set(&m->v, v, v + 1); u32 swp = atomic32_fetch_test_set(&m->v, v, v + 1);
if (v == swp) { if (v == swp) {
locked = 1; locked = 1;
} else { } else {
@ -107,11 +107,11 @@ void snc_unlock(struct snc_lock *l)
struct snc_mutex *m = l->mutex; struct snc_mutex *m = l->mutex;
if (l->exclusive) { if (l->exclusive) {
#if RTC #if RTC
atomic_i32_fetch_set(&m->exclusive_fiber_id, 0); atomic32_fetch_set(&m->exclusive_fiber_id, 0);
#endif #endif
atomic_u32_fetch_set(&m->v, 0); atomic32_fetch_set(&m->v, 0);
} else { } else {
atomic_u32_fetch_add_i32(&m->v, -1); atomic32_fetch_add(&m->v, -1);
} }
sys_wake(&m->v, I32_MAX); sys_wake(&m->v, I32_MAX);
MEMZERO_STRUCT(l); MEMZERO_STRUCT(l);
@ -128,7 +128,7 @@ void snc_cv_wait(struct snc_cv *cv, struct snc_lock *l)
void snc_cv_wait_time(struct snc_cv *cv, struct snc_lock *l, i64 timeout_ns) void snc_cv_wait_time(struct snc_cv *cv, struct snc_lock *l, i64 timeout_ns)
{ {
u64 old_wake_gen = atomic_u64_fetch(&cv->wake_gen); u64 old_wake_gen = atomic64_fetch(&cv->wake_gen);
struct snc_mutex *mutex = l->mutex; struct snc_mutex *mutex = l->mutex;
b32 exclusive = l->exclusive; b32 exclusive = l->exclusive;
{ {
@ -146,7 +146,7 @@ void snc_cv_wait_time(struct snc_cv *cv, struct snc_lock *l, i64 timeout_ns)
void snc_cv_signal(struct snc_cv *cv, i32 count) void snc_cv_signal(struct snc_cv *cv, i32 count)
{ {
atomic_u64_fetch_add_u64(&cv->wake_gen, 1); atomic64_fetch_add(&cv->wake_gen, 1);
sys_wake(&cv->wake_gen, count); sys_wake(&cv->wake_gen, count);
} }
@ -156,7 +156,7 @@ void snc_cv_signal(struct snc_cv *cv, i32 count)
void snc_counter_add(struct snc_counter *counter, i64 x) void snc_counter_add(struct snc_counter *counter, i64 x)
{ {
i64 old_v = atomic_i64_fetch_add(&counter->v, x); i64 old_v = atomic64_fetch_add(&counter->v, x);
i64 new_v = old_v + x; i64 new_v = old_v + x;
if (old_v > 0 && new_v <= 0) { if (old_v > 0 && new_v <= 0) {
sys_wake(&counter->v, I32_MAX); sys_wake(&counter->v, I32_MAX);
@ -165,9 +165,9 @@ void snc_counter_add(struct snc_counter *counter, i64 x)
void snc_counter_wait(struct snc_counter *counter) void snc_counter_wait(struct snc_counter *counter)
{ {
i64 v = atomic_i64_fetch(&counter->v); i64 v = atomic64_fetch(&counter->v);
while (v > 0) { while (v > 0) {
sys_wait(&counter->v, &v, sizeof(v), I64_MAX); sys_wait(&counter->v, &v, sizeof(v), I64_MAX);
v = atomic_i64_fetch(&counter->v); v = atomic64_fetch(&counter->v);
} }
} }

View File

@ -15,10 +15,10 @@ struct alignas(64) snc_mutex {
* Bit 30 = Exclusive lock is pending * Bit 30 = Exclusive lock is pending
* Bit 0-30 = Shared locks count * Bit 0-30 = Shared locks count
*/ */
struct atomic_u32 v; struct atomic32 v;
#if RTC #if RTC
struct atomic_i32 exclusive_fiber_id; struct atomic32 exclusive_fiber_id;
u8 _pad[56]; u8 _pad[56];
#else #else
u8 _pad[60]; u8 _pad[60];
@ -46,7 +46,7 @@ void snc_unlock(struct snc_lock *lock);
* ========================== */ * ========================== */
struct alignas(64) snc_cv { struct alignas(64) snc_cv {
struct atomic_u64 wake_gen; struct atomic64 wake_gen;
u8 _pad[56]; u8 _pad[56];
}; };
STATIC_ASSERT(sizeof(struct snc_cv) == 64); /* Padding validation */ STATIC_ASSERT(sizeof(struct snc_cv) == 64); /* Padding validation */
@ -61,7 +61,7 @@ void snc_cv_signal(struct snc_cv *cv, i32 count);
* ========================== */ * ========================== */
struct alignas(64) snc_counter { struct alignas(64) snc_counter {
struct atomic_i64 v; struct atomic64 v;
u8 _pad[56]; u8 _pad[56];
}; };
STATIC_ASSERT(sizeof(struct snc_counter) == 64); /* Padding validation */ STATIC_ASSERT(sizeof(struct snc_counter) == 64); /* Padding validation */

View File

@ -66,8 +66,8 @@ struct cache_entry_hash {
struct cache_entry { struct cache_entry {
enum cache_entry_kind kind; enum cache_entry_kind kind;
struct cache_entry_hash hash; struct cache_entry_hash hash;
struct atomic_i32 state; struct atomic32 state;
struct atomic_u64_padded refcount_struct; /* Cast fetched result to `cache_refcount` */ struct atomic64_padded refcount_struct; /* Cast fetched result to `cache_refcount` */
/* Allocated data */ /* Allocated data */
/* NOTE: This data is finalized once entry state = loaded */ /* NOTE: This data is finalized once entry state = loaded */
@ -85,7 +85,7 @@ struct cache_entry {
struct cache_entry *next_free; struct cache_entry *next_free;
#if RESOURCE_RELOADING #if RESOURCE_RELOADING
struct atomic_i32 out_of_date; /* Has the resource changed since this entry was loaded */ struct atomic32 out_of_date; /* Has the resource changed since this entry was loaded */
#endif #endif
}; };
@ -96,7 +96,7 @@ struct cache_bin {
}; };
struct cache { struct cache {
struct atomic_u64_padded memory_usage; struct atomic64_padded memory_usage;
struct arena *arena; struct arena *arena;
struct cache_bin *bins; struct cache_bin *bins;
struct snc_mutex entry_pool_mutex; struct snc_mutex entry_pool_mutex;
@ -151,7 +151,7 @@ GLOBAL struct {
struct sprite_scope *first_free_scope; struct sprite_scope *first_free_scope;
/* Evictor */ /* Evictor */
struct atomic_i32_padded evictor_cycle; struct atomic32_padded evictor_cycle;
struct snc_counter shutdown_counter; struct snc_counter shutdown_counter;
b32 evictor_scheduler_shutdown; b32 evictor_scheduler_shutdown;
struct snc_mutex evictor_scheduler_mutex; struct snc_mutex evictor_scheduler_mutex;
@ -334,7 +334,7 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag t
struct arena_temp scratch = scratch_begin_no_conflict(); struct arena_temp scratch = scratch_begin_no_conflict();
struct cache_entry *e = ref.e; struct cache_entry *e = ref.e;
atomic_i32_fetch_set(&e->state, CACHE_ENTRY_STATE_WORKING); atomic32_fetch_set(&e->state, CACHE_ENTRY_STATE_WORKING);
struct string path = tag.path; struct string path = tag.path;
logf_info("Loading sprite texture [%F] \"%F\"", FMT_HEX(e->hash.v), FMT_STR(path)); logf_info("Loading sprite texture [%F] \"%F\"", FMT_HEX(e->hash.v), FMT_STR(path));
@ -376,7 +376,7 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag t
} }
arena_set_readonly(e->arena); arena_set_readonly(e->arena);
e->memory_usage = e->arena->committed + memory_size; e->memory_usage = e->arena->committed + memory_size;
atomic_u64_fetch_add_u64(&G.cache.memory_usage.v, e->memory_usage); atomic64_fetch_add(&G.cache.memory_usage.v, e->memory_usage);
if (success) { if (success) {
logf_success("Loaded sprite texture [%F] \"%F\" in %F seconds (cache size: %F bytes).", logf_success("Loaded sprite texture [%F] \"%F\" in %F seconds (cache size: %F bytes).",
@ -386,7 +386,7 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag t
FMT_UINT(e->memory_usage)); FMT_UINT(e->memory_usage));
} }
atomic_i32_fetch_set(&e->state, CACHE_ENTRY_STATE_LOADED); atomic32_fetch_set(&e->state, CACHE_ENTRY_STATE_LOADED);
#if RESOURCE_RELOADING #if RESOURCE_RELOADING
struct cache_bin *bin = &G.cache.bins[e->hash.v % CACHE_BINS_COUNT]; struct cache_bin *bin = &G.cache.bins[e->hash.v % CACHE_BINS_COUNT];
@ -394,7 +394,7 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag t
{ {
for (struct cache_entry *old_entry = bin->first; old_entry; old_entry = old_entry->next_in_bin) { for (struct cache_entry *old_entry = bin->first; old_entry; old_entry = old_entry->next_in_bin) {
if (old_entry != e && old_entry->hash.v == e->hash.v) { if (old_entry != e && old_entry->hash.v == e->hash.v) {
atomic_i32_fetch_set(&old_entry->out_of_date, 1); atomic32_fetch_set(&old_entry->out_of_date, 1);
} }
} }
e->load_time_ns = sys_time_ns(); e->load_time_ns = sys_time_ns();
@ -657,7 +657,7 @@ INTERNAL void cache_entry_load_sheet(struct cache_ref ref, struct sprite_tag tag
struct arena_temp scratch = scratch_begin_no_conflict(); struct arena_temp scratch = scratch_begin_no_conflict();
struct cache_entry *e = ref.e; struct cache_entry *e = ref.e;
atomic_i32_fetch_set(&e->state, CACHE_ENTRY_STATE_WORKING); atomic32_fetch_set(&e->state, CACHE_ENTRY_STATE_WORKING);
struct string path = tag.path; struct string path = tag.path;
logf_info("Loading sprite sheet [%F] \"%F\"", FMT_HEX(e->hash.v), FMT_STR(path)); logf_info("Loading sprite sheet [%F] \"%F\"", FMT_HEX(e->hash.v), FMT_STR(path));
@ -697,7 +697,7 @@ INTERNAL void cache_entry_load_sheet(struct cache_ref ref, struct sprite_tag tag
} }
arena_set_readonly(e->arena); arena_set_readonly(e->arena);
e->memory_usage = e->arena->committed; e->memory_usage = e->arena->committed;
atomic_u64_fetch_add_u64(&G.cache.memory_usage.v, e->memory_usage); atomic64_fetch_add(&G.cache.memory_usage.v, e->memory_usage);
if (success) { if (success) {
logf_success("Loaded sprite sheet [%F] \"%F\" in %F seconds (cache size: %F bytes).", logf_success("Loaded sprite sheet [%F] \"%F\" in %F seconds (cache size: %F bytes).",
@ -707,7 +707,7 @@ INTERNAL void cache_entry_load_sheet(struct cache_ref ref, struct sprite_tag tag
FMT_UINT(e->memory_usage)); FMT_UINT(e->memory_usage));
} }
atomic_i32_fetch_set(&e->state, CACHE_ENTRY_STATE_LOADED); atomic32_fetch_set(&e->state, CACHE_ENTRY_STATE_LOADED);
#if RESOURCE_RELOADING #if RESOURCE_RELOADING
struct cache_bin *bin = &G.cache.bins[e->hash.v % CACHE_BINS_COUNT]; struct cache_bin *bin = &G.cache.bins[e->hash.v % CACHE_BINS_COUNT];
@ -715,7 +715,7 @@ INTERNAL void cache_entry_load_sheet(struct cache_ref ref, struct sprite_tag tag
{ {
for (struct cache_entry *old_entry = bin->first; old_entry; old_entry = old_entry->next_in_bin) { for (struct cache_entry *old_entry = bin->first; old_entry; old_entry = old_entry->next_in_bin) {
if (old_entry != e && old_entry->hash.v == e->hash.v) { if (old_entry != e && old_entry->hash.v == e->hash.v) {
atomic_i32_fetch_set(&old_entry->out_of_date, 1); atomic32_fetch_set(&old_entry->out_of_date, 1);
} }
} }
e->load_time_ns = sys_time_ns(); e->load_time_ns = sys_time_ns();
@ -732,14 +732,14 @@ INTERNAL void cache_entry_load_sheet(struct cache_ref ref, struct sprite_tag tag
INTERNAL void refcount_add(struct cache_entry *e, i32 amount) INTERNAL void refcount_add(struct cache_entry *e, i32 amount)
{ {
i32 evictor_cycle = atomic_i32_fetch(&G.evictor_cycle.v); i32 evictor_cycle = atomic32_fetch(&G.evictor_cycle.v);
struct atomic_u64 *refcount_atomic = &e->refcount_struct.v; struct atomic64 *refcount_atomic = &e->refcount_struct.v;
u64 old_refcount_uncast = atomic_u64_fetch(refcount_atomic); u64 old_refcount_uncast = atomic64_fetch(refcount_atomic);
do { do {
struct cache_refcount new_refcount = *(struct cache_refcount *)&old_refcount_uncast; struct cache_refcount new_refcount = *(struct cache_refcount *)&old_refcount_uncast;
new_refcount.count += amount; new_refcount.count += amount;
new_refcount.last_ref_cycle = evictor_cycle; new_refcount.last_ref_cycle = evictor_cycle;
u64 v = atomic_u64_fetch_test_set(refcount_atomic, old_refcount_uncast, *(u64 *)&new_refcount); u64 v = atomic64_fetch_test_set(refcount_atomic, old_refcount_uncast, *(u64 *)&new_refcount);
if (v != old_refcount_uncast) { if (v != old_refcount_uncast) {
old_refcount_uncast = v; old_refcount_uncast = v;
} else { } else {
@ -861,7 +861,7 @@ INTERNAL struct sprite_scope_cache_ref *cache_lookup(struct sprite_scope *scope,
enum cache_entry_state match_state = CACHE_ENTRY_STATE_NONE; enum cache_entry_state match_state = CACHE_ENTRY_STATE_NONE;
for (struct cache_entry *entry = bin->first; entry; entry = entry->next_in_bin) { for (struct cache_entry *entry = bin->first; entry; entry = entry->next_in_bin) {
if (entry->hash.v == hash.v) { if (entry->hash.v == hash.v) {
enum cache_entry_state entry_state = atomic_i32_fetch(&entry->state); enum cache_entry_state entry_state = atomic32_fetch(&entry->state);
if (!match || entry_state > match_state || (entry_state == CACHE_ENTRY_STATE_LOADED && match_state == CACHE_ENTRY_STATE_LOADED && entry->load_time_ns > match->load_time_ns)) { if (!match || entry_state > match_state || (entry_state == CACHE_ENTRY_STATE_LOADED && match_state == CACHE_ENTRY_STATE_LOADED && entry->load_time_ns > match->load_time_ns)) {
match = entry; match = entry;
match_state = entry_state; match_state = entry_state;
@ -975,7 +975,7 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
struct sprite_scope_cache_ref *scope_ref = cache_entry_from_tag(scope, tag, kind, 0); struct sprite_scope_cache_ref *scope_ref = cache_entry_from_tag(scope, tag, kind, 0);
struct cache_ref ref = scope_ref->ref; struct cache_ref ref = scope_ref->ref;
enum cache_entry_state state = atomic_i32_fetch(&ref.e->state); enum cache_entry_state state = atomic32_fetch(&ref.e->state);
if (state == CACHE_ENTRY_STATE_LOADED) { if (state == CACHE_ENTRY_STATE_LOADED) {
switch (kind) { switch (kind) {
case CACHE_ENTRY_KIND_TEXTURE: { res = ref.e->texture; } break; case CACHE_ENTRY_KIND_TEXTURE: { res = ref.e->texture; } break;
@ -984,7 +984,7 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
} }
} else if (state == CACHE_ENTRY_STATE_NONE) { } else if (state == CACHE_ENTRY_STATE_NONE) {
/* If entry is new, load texture */ /* If entry is new, load texture */
if (atomic_i32_fetch_test_set(&ref.e->state, CACHE_ENTRY_STATE_NONE, CACHE_ENTRY_STATE_QUEUED) == CACHE_ENTRY_STATE_NONE) { if (atomic32_fetch_test_set(&ref.e->state, CACHE_ENTRY_STATE_NONE, CACHE_ENTRY_STATE_QUEUED) == CACHE_ENTRY_STATE_NONE) {
/* If caller is awaiting result then just load now on the calling thread. Otherwise spawn a work task. */ /* If caller is awaiting result then just load now on the calling thread. Otherwise spawn a work task. */
if (await) { if (await) {
switch (kind) { switch (kind) {
@ -1007,7 +1007,7 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
/* Spinlock until result is ready */ /* Spinlock until result is ready */
if (await && state != CACHE_ENTRY_STATE_LOADED) { if (await && state != CACHE_ENTRY_STATE_LOADED) {
while (atomic_i32_fetch(&ref.e->state) != CACHE_ENTRY_STATE_LOADED) { while (atomic32_fetch(&ref.e->state) != CACHE_ENTRY_STATE_LOADED) {
ix_pause(); ix_pause();
} }
} }
@ -1231,10 +1231,10 @@ INTERNAL SYS_JOB_DEF(sprite_evictor_job, _)
u64 evict_array_count = 0; u64 evict_array_count = 0;
struct evict_node *evict_array = arena_push_dry(scratch.arena, struct evict_node); struct evict_node *evict_array = arena_push_dry(scratch.arena, struct evict_node);
{ {
i32 cur_cycle = atomic_i32_fetch(&G.evictor_cycle.v); i32 cur_cycle = atomic32_fetch(&G.evictor_cycle.v);
/* Scan for evictable nodes */ /* Scan for evictable nodes */
b32 cache_over_budget_threshold = atomic_u64_fetch(&G.cache.memory_usage.v) > CACHE_MEMORY_BUDGET_THRESHOLD; b32 cache_over_budget_threshold = atomic64_fetch(&G.cache.memory_usage.v) > (i64)CACHE_MEMORY_BUDGET_THRESHOLD;
if (cache_over_budget_threshold || RESOURCE_RELOADING) { if (cache_over_budget_threshold || RESOURCE_RELOADING) {
__profn("Evictor scan"); __profn("Evictor scan");
for (u64 i = 0; i < CACHE_BINS_COUNT; ++i) { for (u64 i = 0; i < CACHE_BINS_COUNT; ++i) {
@ -1243,12 +1243,12 @@ INTERNAL SYS_JOB_DEF(sprite_evictor_job, _)
{ {
struct cache_entry *n = bin->first; struct cache_entry *n = bin->first;
while (n) { while (n) {
u64 refcount_uncast = atomic_u64_fetch(&n->refcount_struct.v); u64 refcount_uncast = atomic64_fetch(&n->refcount_struct.v);
struct cache_refcount refcount = *(struct cache_refcount *)&refcount_uncast; struct cache_refcount refcount = *(struct cache_refcount *)&refcount_uncast;
if (refcount.count <= 0) { if (refcount.count <= 0) {
/* Add node to evict list */ /* Add node to evict list */
#if RESOURCE_RELOADING #if RESOURCE_RELOADING
b32 is_out_of_date = atomic_i32_fetch(&n->out_of_date); b32 is_out_of_date = atomic32_fetch(&n->out_of_date);
#else #else
b32 is_out_of_date = 0; b32 is_out_of_date = 0;
#endif #endif
@ -1291,10 +1291,10 @@ INTERNAL SYS_JOB_DEF(sprite_evictor_job, _)
struct cache_bin *bin = en->cache_bin; struct cache_bin *bin = en->cache_bin;
struct cache_entry *entry = en->cache_entry; struct cache_entry *entry = en->cache_entry;
i32 last_ref_cycle = en->last_ref_cycle; i32 last_ref_cycle = en->last_ref_cycle;
b32 cache_over_budget_target = atomic_u64_fetch(&G.cache.memory_usage.v) > CACHE_MEMORY_BUDGET_TARGET; b32 cache_over_budget_target = atomic64_fetch(&G.cache.memory_usage.v) > (i64)CACHE_MEMORY_BUDGET_TARGET;
struct snc_lock bin_lock = snc_lock_e(&bin->mutex); struct snc_lock bin_lock = snc_lock_e(&bin->mutex);
{ {
u64 refcount_uncast = atomic_u64_fetch(&entry->refcount_struct.v); u64 refcount_uncast = atomic64_fetch(&entry->refcount_struct.v);
struct cache_refcount refcount = *(struct cache_refcount *)&refcount_uncast; struct cache_refcount refcount = *(struct cache_refcount *)&refcount_uncast;
if (refcount.count > 0 || (last_ref_cycle >= 0 && refcount.last_ref_cycle != en->last_ref_cycle)) { if (refcount.count > 0 || (last_ref_cycle >= 0 && refcount.last_ref_cycle != en->last_ref_cycle)) {
/* Cache node has been referenced since scan, skip node. */ /* Cache node has been referenced since scan, skip node. */
@ -1313,7 +1313,7 @@ INTERNAL SYS_JOB_DEF(sprite_evictor_job, _)
bin->last = prev; bin->last = prev;
} }
atomic_u64_fetch_add_i64(&G.cache.memory_usage.v, -((i64)entry->memory_usage)); atomic64_fetch_add(&G.cache.memory_usage.v, -((i64)entry->memory_usage));
/* Add to evicted list */ /* Add to evicted list */
en->next_evicted = first_evicted; en->next_evicted = first_evicted;
@ -1353,7 +1353,7 @@ INTERNAL SYS_JOB_DEF(sprite_evictor_job, _)
} }
} }
} }
atomic_i32_fetch_add(&G.evictor_cycle.v, 1); atomic32_fetch_add(&G.evictor_cycle.v, 1);
scratch_end(scratch); scratch_end(scratch);
} }

View File

@ -53,8 +53,8 @@
#define WAKE_ALL_THRESHOLD 24 #define WAKE_ALL_THRESHOLD 24
struct ticket_mutex { struct ticket_mutex {
struct atomic_i64_padded ticket; struct atomic64_padded ticket;
struct atomic_i64_padded serving; struct atomic64_padded serving;
}; };
struct win32_thread { struct win32_thread {
@ -114,7 +114,7 @@ struct win32_window {
struct sys_thread *event_thread; struct sys_thread *event_thread;
struct sys_thread *present_thread; struct sys_thread *present_thread;
struct atomic_i32 shutdown; struct atomic32 shutdown;
struct win32_window *next_free; struct win32_window *next_free;
}; };
@ -164,7 +164,7 @@ struct alignas(64) fiber {
/* ==================================================== */ /* ==================================================== */
i16 id; /* 02 bytes */ i16 id; /* 02 bytes */
i16 parent_id; /* 02 bytes */ i16 parent_id; /* 02 bytes */
struct atomic_i32 wake_lock; /* 04 bytes */ struct atomic32 wake_lock; /* 04 bytes */
/* ==================================================== */ /* ==================================================== */
u64 wait_addr; /* 08 bytes */ u64 wait_addr; /* 08 bytes */
/* ==================================================== */ /* ==================================================== */
@ -239,8 +239,8 @@ struct alignas(64) job_pool {
struct job_queue job_queues[NUM_SYS_PRIORITIES]; struct job_queue job_queues[NUM_SYS_PRIORITIES];
/* Workers */ /* Workers */
struct atomic_i32_padded workers_shutdown; struct atomic32_padded workers_shutdown;
struct atomic_i64_padded num_jobs_in_queue; struct atomic64_padded num_jobs_in_queue;
struct snc_mutex workers_wake_mutex; struct snc_mutex workers_wake_mutex;
struct snc_cv workers_wake_cv; struct snc_cv workers_wake_cv;
@ -261,12 +261,12 @@ GLOBAL struct {
i64 timer_start_qpc; i64 timer_start_qpc;
i64 ns_per_qpc; i64 ns_per_qpc;
u32 main_thread_id; u32 main_thread_id;
struct atomic_i32 shutdown; struct atomic32 shutdown;
wchar_t cmdline_args_wstr[8192]; wchar_t cmdline_args_wstr[8192];
/* Panic */ /* Panic */
struct atomic_i32 panicking; struct atomic32 panicking;
wchar_t panic_wstr[4096]; wchar_t panic_wstr[4096];
HANDLE panic_event; HANDLE panic_event;
@ -294,12 +294,12 @@ GLOBAL struct {
/* Exit funcs */ /* Exit funcs */
struct atomic_i32 num_exit_funcs; struct atomic32 num_exit_funcs;
sys_exit_func *exit_funcs[MAX_EXIT_FUNCS]; sys_exit_func *exit_funcs[MAX_EXIT_FUNCS];
/* Scheduler */ /* Scheduler */
struct atomic_i64_padded current_scheduler_cycle; struct atomic64_padded current_scheduler_cycle;
struct atomic_i64_padded current_scheduler_cycle_period_ns; struct atomic64_padded current_scheduler_cycle_period_ns;
/* Fibers */ /* Fibers */
i16 num_fibers; i16 num_fibers;
@ -309,7 +309,7 @@ GLOBAL struct {
struct fiber fibers[SYS_MAX_FIBERS]; struct fiber fibers[SYS_MAX_FIBERS];
/* Wait lists */ /* Wait lists */
struct atomic_u64_padded waiter_wake_gen; struct atomic64_padded waiter_wake_gen;
struct ticket_mutex wait_lists_arena_lock; struct ticket_mutex wait_lists_arena_lock;
struct arena *wait_lists_arena; struct arena *wait_lists_arena;
@ -336,15 +336,15 @@ INTERNAL void job_fiber_yield(struct fiber *fiber, struct fiber *parent_fiber);
INTERNAL void tm_lock(struct ticket_mutex *tm) INTERNAL void tm_lock(struct ticket_mutex *tm)
{ {
i64 ticket = atomic_i64_fetch_add(&tm->ticket.v, 1); i64 ticket = atomic64_fetch_add(&tm->ticket.v, 1);
while (atomic_i64_fetch(&tm->serving.v) != ticket) { while (atomic64_fetch(&tm->serving.v) != ticket) {
ix_pause(); ix_pause();
} }
} }
INTERNAL void tm_unlock(struct ticket_mutex *tm) INTERNAL void tm_unlock(struct ticket_mutex *tm)
{ {
atomic_i64_fetch_add(&tm->serving.v, 1); atomic64_fetch_add(&tm->serving.v, 1);
} }
@ -357,7 +357,7 @@ INTERNAL void tm_unlock(struct ticket_mutex *tm)
void sys_on_exit(sys_exit_func *func) void sys_on_exit(sys_exit_func *func)
{ {
i32 index = atomic_i32_fetch_add(&G.num_exit_funcs, 1); i32 index = atomic32_fetch_add(&G.num_exit_funcs, 1);
if (index >= MAX_EXIT_FUNCS) { if (index >= MAX_EXIT_FUNCS) {
sys_panic(LIT("Maximum on exit functions registered")); sys_panic(LIT("Maximum on exit functions registered"));
} }
@ -374,7 +374,7 @@ void sys_on_exit(sys_exit_func *func)
i64 sys_current_scheduler_period_ns(void) i64 sys_current_scheduler_period_ns(void)
{ {
return atomic_i64_fetch(&G.current_scheduler_cycle_period_ns.v); return atomic64_fetch(&G.current_scheduler_cycle_period_ns.v);
} }
@ -564,7 +564,7 @@ INTERNAL void wake_fibers_locked(i32 num_fibers, struct fiber **fibers)
queue->first = info; queue->first = info;
} }
queue->last = info; queue->last = info;
atomic_i32_fetch_set(&fiber->wake_lock, 0); atomic32_fetch_set(&fiber->wake_lock, 0);
} }
tm_unlock(&queue->lock); tm_unlock(&queue->lock);
} }
@ -577,7 +577,7 @@ INTERNAL void wake_fibers_locked(i32 num_fibers, struct fiber **fibers)
struct job_pool *pool = &G.job_pools[pool_kind]; struct job_pool *pool = &G.job_pools[pool_kind];
struct snc_lock lock = snc_lock_e(&pool->workers_wake_mutex); struct snc_lock lock = snc_lock_e(&pool->workers_wake_mutex);
{ {
atomic_i64_fetch_add(&pool->num_jobs_in_queue.v, wake_count); atomic64_fetch_add(&pool->num_jobs_in_queue.v, wake_count);
snc_cv_signal(&pool->workers_wake_cv, wake_count); snc_cv_signal(&pool->workers_wake_cv, wake_count);
} }
snc_unlock(&lock); snc_unlock(&lock);
@ -611,7 +611,7 @@ INTERNAL void wake_address(void *addr, i32 count)
if (wait_addr_list) { if (wait_addr_list) {
fibers = arena_push_array_no_zero(scratch.arena, struct fiber *, wait_addr_list->num_waiters); fibers = arena_push_array_no_zero(scratch.arena, struct fiber *, wait_addr_list->num_waiters);
for (struct fiber *fiber = fiber_from_id(wait_addr_list->first_waiter); fiber && num_fibers < count; fiber = fiber_from_id(fiber->next_addr_waiter)) { for (struct fiber *fiber = fiber_from_id(wait_addr_list->first_waiter); fiber && num_fibers < count; fiber = fiber_from_id(fiber->next_addr_waiter)) {
if (atomic_i32_fetch_test_set(&fiber->wake_lock, 0, 1) == 0) { if (atomic32_fetch_test_set(&fiber->wake_lock, 0, 1) == 0) {
fibers[num_fibers] = fiber; fibers[num_fibers] = fiber;
++num_fibers; ++num_fibers;
} }
@ -660,7 +660,7 @@ INTERNAL void wake_time(u64 time)
/* Set waiter wake status & build fibers list */ /* Set waiter wake status & build fibers list */
fibers = arena_push_array_no_zero(scratch.arena, struct fiber *, wait_time_list->num_waiters); fibers = arena_push_array_no_zero(scratch.arena, struct fiber *, wait_time_list->num_waiters);
for (struct fiber *fiber = fiber_from_id(wait_time_list->first_waiter); fiber; fiber = fiber_from_id(fiber->next_time_waiter)) { for (struct fiber *fiber = fiber_from_id(wait_time_list->first_waiter); fiber; fiber = fiber_from_id(fiber->next_time_waiter)) {
if (atomic_i32_fetch_test_set(&fiber->wake_lock, 0, 1) == 0) { if (atomic32_fetch_test_set(&fiber->wake_lock, 0, 1) == 0) {
fibers[num_fibers] = fiber; fibers[num_fibers] = fiber;
++num_fibers; ++num_fibers;
} }
@ -841,7 +841,7 @@ void sys_run(i32 count, sys_job_func *func, void *sig, enum sys_pool pool_kind,
/* Wake workers */ /* Wake workers */
struct snc_lock lock = snc_lock_e(&pool->workers_wake_mutex); struct snc_lock lock = snc_lock_e(&pool->workers_wake_mutex);
{ {
atomic_i64_fetch_add(&pool->num_jobs_in_queue.v, count); atomic64_fetch_add(&pool->num_jobs_in_queue.v, count);
snc_cv_signal(&pool->workers_wake_cv, count); snc_cv_signal(&pool->workers_wake_cv, count);
} }
snc_unlock(&lock); snc_unlock(&lock);
@ -963,7 +963,7 @@ INTERNAL SYS_THREAD_DEF(job_worker_entry, worker_ctx_arg)
job_id = info->num_dispatched++; job_id = info->num_dispatched++;
if (job_id < info->count) { if (job_id < info->count) {
/* Pick job */ /* Pick job */
atomic_i64_fetch_add(&pool->num_jobs_in_queue.v, -1); atomic64_fetch_add(&pool->num_jobs_in_queue.v, -1);
job_priority = priority; job_priority = priority;
job_func = info->func; job_func = info->func;
job_sig = info->sig; job_sig = info->sig;
@ -975,7 +975,7 @@ INTERNAL SYS_THREAD_DEF(job_worker_entry, worker_ctx_arg)
} }
} else { } else {
/* This job is to be resumed from a yield */ /* This job is to be resumed from a yield */
atomic_i64_fetch_add(&pool->num_jobs_in_queue.v, -1); atomic64_fetch_add(&pool->num_jobs_in_queue.v, -1);
job_fiber_id = info->fiber_id; job_fiber_id = info->fiber_id;
job_priority = priority; job_priority = priority;
job_id = info->num_dispatched; job_id = info->num_dispatched;
@ -1046,8 +1046,8 @@ INTERNAL SYS_THREAD_DEF(job_worker_entry, worker_ctx_arg)
i64 wait_timeout_ns = yield.wait.timeout_ns; i64 wait_timeout_ns = yield.wait.timeout_ns;
i64 wait_time = 0; i64 wait_time = 0;
if (wait_timeout_ns > 0 && wait_timeout_ns < I64_MAX) { if (wait_timeout_ns > 0 && wait_timeout_ns < I64_MAX) {
u64 current_scheduler_cycle = atomic_i64_fetch(&G.current_scheduler_cycle.v); u64 current_scheduler_cycle = atomic64_fetch(&G.current_scheduler_cycle.v);
i64 current_scheduler_cycle_period_ns = atomic_i64_fetch(&G.current_scheduler_cycle_period_ns.v); i64 current_scheduler_cycle_period_ns = atomic64_fetch(&G.current_scheduler_cycle_period_ns.v);
wait_time = current_scheduler_cycle + max_i64((i64)((f64)wait_timeout_ns / (f64)current_scheduler_cycle_period_ns), 1); wait_time = current_scheduler_cycle + max_i64((i64)((f64)wait_timeout_ns / (f64)current_scheduler_cycle_period_ns), 1);
} }
@ -1071,7 +1071,7 @@ INTERNAL SYS_THREAD_DEF(job_worker_entry, worker_ctx_arg)
} }
} }
if (wait_time != 0 && !cancel_wait) { if (wait_time != 0 && !cancel_wait) {
cancel_wait = wait_time <= atomic_i64_fetch(&G.current_scheduler_cycle.v); cancel_wait = wait_time <= atomic64_fetch(&G.current_scheduler_cycle.v);
} }
if (!cancel_wait) { if (!cancel_wait) {
if (wait_addr != 0) { if (wait_addr != 0) {
@ -1182,11 +1182,11 @@ INTERNAL SYS_THREAD_DEF(job_worker_entry, worker_ctx_arg)
/* Wait */ /* Wait */
struct snc_lock wake_lock = snc_lock_s(&pool->workers_wake_mutex); struct snc_lock wake_lock = snc_lock_s(&pool->workers_wake_mutex);
{ {
shutdown = atomic_i32_fetch(&pool->workers_shutdown.v); shutdown = atomic32_fetch(&pool->workers_shutdown.v);
while (atomic_i64_fetch(&pool->num_jobs_in_queue.v) <= 0 && !shutdown) { while (atomic64_fetch(&pool->num_jobs_in_queue.v) <= 0 && !shutdown) {
//__profnc("Wait for job", RGB32_F(0.75, 0.75, 0)); //__profnc("Wait for job", RGB32_F(0.75, 0.75, 0));
snc_cv_wait(&pool->workers_wake_cv, &wake_lock); snc_cv_wait(&pool->workers_wake_cv, &wake_lock);
shutdown = atomic_i32_fetch(&pool->workers_shutdown.v); shutdown = atomic32_fetch(&pool->workers_shutdown.v);
} }
} }
snc_unlock(&wake_lock); snc_unlock(&wake_lock);
@ -1224,7 +1224,7 @@ INTERNAL SYS_THREAD_DEF(job_scheduler_entry, _)
} }
i64 last_cycle_ns = 0; i64 last_cycle_ns = 0;
while (!atomic_i32_fetch(&G.shutdown)) { while (!atomic32_fetch(&G.shutdown)) {
__profn("Job scheduler cycle"); __profn("Job scheduler cycle");
{ {
__profn("Job scheduler wait"); __profn("Job scheduler wait");
@ -1254,12 +1254,12 @@ INTERNAL SYS_THREAD_DEF(job_scheduler_entry, _)
periods_sum_ns += (f64)periods[i]; periods_sum_ns += (f64)periods[i];
} }
f64 mean_ns = periods_sum_ns / (f64)countof(periods); f64 mean_ns = periods_sum_ns / (f64)countof(periods);
atomic_i64_fetch_set(&G.current_scheduler_cycle_period_ns.v, math_round_to_int64(mean_ns)); atomic64_fetch_set(&G.current_scheduler_cycle_period_ns.v, math_round_to_int64(mean_ns));
} }
{ {
__profn("Job scheduler run"); __profn("Job scheduler run");
i64 current_cycle = atomic_i64_fetch_add(&G.current_scheduler_cycle.v, 1) + 1; i64 current_cycle = atomic64_fetch_add(&G.current_scheduler_cycle.v, 1) + 1;
wake_time((u64)current_cycle); wake_time((u64)current_cycle);
} }
} }
@ -1275,7 +1275,7 @@ INTERNAL SYS_THREAD_DEF(test_entry, _)
(UNUSED)_; (UNUSED)_;
/* Start scheduler */ /* Start scheduler */
atomic_i64_fetch_set(&G.current_scheduler_cycle_period_ns.v, DEFAULT_SCHEDULER_CYCLE_PERIOD_NS); atomic64_fetch_set(&G.current_scheduler_cycle_period_ns.v, DEFAULT_SCHEDULER_CYCLE_PERIOD_NS);
struct sys_thread *scheduler_thread = sys_thread_alloc(job_scheduler_entry, 0, LIT("Scheduler thread"), PROF_THREAD_GROUP_SCHEDULER); struct sys_thread *scheduler_thread = sys_thread_alloc(job_scheduler_entry, 0, LIT("Scheduler thread"), PROF_THREAD_GROUP_SCHEDULER);
/* Start workers */ /* Start workers */
@ -2185,14 +2185,14 @@ INTERNAL SYS_THREAD_DEF(window_event_thread_entry_point, arg)
snc_counter_add(&window->ready_fence, -1); snc_counter_add(&window->ready_fence, -1);
while (!atomic_i32_fetch(&window->shutdown)) { while (!atomic32_fetch(&window->shutdown)) {
MSG msg = ZI; MSG msg = ZI;
{ {
GetMessageW(&msg, 0, 0, 0); GetMessageW(&msg, 0, 0, 0);
} }
{ {
__profn("Process window message"); __profn("Process window message");
if (atomic_i32_fetch(&window->shutdown)) { if (atomic32_fetch(&window->shutdown)) {
break; break;
} }
@ -2266,7 +2266,7 @@ INTERNAL SYS_THREAD_DEF(window_present_thread_entry_point, arg)
/* Show window */ /* Show window */
sys_window_show((struct sys_window *)window); sys_window_show((struct sys_window *)window);
while (!atomic_i32_fetch(&window->shutdown)) { while (!atomic32_fetch(&window->shutdown)) {
{ {
__profn("Swapchain wait"); __profn("Swapchain wait");
gp_swapchain_wait(window->swapchain); gp_swapchain_wait(window->swapchain);
@ -2337,7 +2337,7 @@ INTERNAL struct win32_window *win32_window_alloc(sys_job_func *present_job)
INTERNAL void win32_window_release(struct win32_window *window) INTERNAL void win32_window_release(struct win32_window *window)
{ {
/* Stop window threads */ /* Stop window threads */
atomic_i32_fetch_set(&window->shutdown, 1); atomic32_fetch_set(&window->shutdown, 1);
win32_window_wake(window); win32_window_wake(window);
sys_thread_wait_release(window->present_thread); sys_thread_wait_release(window->present_thread);
sys_thread_wait_release(window->event_thread); sys_thread_wait_release(window->event_thread);
@ -3105,7 +3105,7 @@ void sys_exit(void)
void sys_panic(struct string msg) void sys_panic(struct string msg)
{ {
if (atomic_i32_fetch_test_set(&G.panicking, 0, 1) == 0) { if (atomic32_fetch_test_set(&G.panicking, 0, 1) == 0) {
log_panic(msg); log_panic(msg);
wchar_t *wstr = G.panic_wstr; wchar_t *wstr = G.panic_wstr;
@ -3352,7 +3352,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
/* Run exit callbacks */ /* Run exit callbacks */
{ {
__profn("Run exit callbacks"); __profn("Run exit callbacks");
i32 num_funcs = atomic_i32_fetch(&G.num_exit_funcs); i32 num_funcs = atomic32_fetch(&G.num_exit_funcs);
for (i32 i = num_funcs - 1; i >= 0; --i) { for (i32 i = num_funcs - 1; i >= 0; --i) {
sys_exit_func *func = G.exit_funcs[i]; sys_exit_func *func = G.exit_funcs[i];
func(); func();
@ -3360,7 +3360,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
} }
/* Signal shutdown */ /* Signal shutdown */
atomic_i32_fetch_set(&G.shutdown, 1); atomic32_fetch_set(&G.shutdown, 1);
/* Shutdown test thread */ /* Shutdown test thread */
{ {
@ -3368,7 +3368,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
struct job_pool *pool = &G.job_pools[pool_kind]; struct job_pool *pool = &G.job_pools[pool_kind];
struct snc_lock lock = snc_lock_e(&pool->workers_wake_mutex); struct snc_lock lock = snc_lock_e(&pool->workers_wake_mutex);
{ {
atomic_i32_fetch_set(&pool->workers_shutdown.v, 1); atomic32_fetch_set(&pool->workers_shutdown.v, 1);
snc_cv_signal(&pool->workers_wake_cv, I32_MAX); snc_cv_signal(&pool->workers_wake_cv, I32_MAX);
} }
snc_unlock(&lock); snc_unlock(&lock);
@ -3377,7 +3377,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
sys_thread_wait_release(test_thread); sys_thread_wait_release(test_thread);
/* Find any dangling threads that haven't exited gracefully by now */ /* Find any dangling threads that haven't exited gracefully by now */
if (!atomic_i32_fetch(&G.panicking)) { if (!atomic32_fetch(&G.panicking)) {
struct snc_lock lock = snc_lock_s(&G.threads_mutex); struct snc_lock lock = snc_lock_s(&G.threads_mutex);
if (G.threads_first) { if (G.threads_first) {
struct arena_temp scratch = scratch_begin_no_conflict(); struct arena_temp scratch = scratch_begin_no_conflict();
@ -3397,7 +3397,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
} }
/* Check if panicking */ /* Check if panicking */
if (atomic_i32_fetch(&G.panicking)) { if (atomic32_fetch(&G.panicking)) {
/* Wait for panic message to be ready */ /* Wait for panic message to be ready */
WaitForSingleObject(G.panic_event, INFINITE); WaitForSingleObject(G.panic_event, INFINITE);
/* Set error and abort */ /* Set error and abort */

View File

@ -48,7 +48,7 @@ struct console_log {
}; };
GLOBAL struct { GLOBAL struct {
struct atomic_i32 shutdown; struct atomic32 shutdown;
struct snc_counter shutdown_job_counters; struct snc_counter shutdown_job_counters;
struct sys_window *window; struct sys_window *window;
@ -103,8 +103,8 @@ GLOBAL struct {
u64 last_user_sim_cmd_gen; u64 last_user_sim_cmd_gen;
u64 user_sim_cmd_gen; u64 user_sim_cmd_gen;
struct atomic_i32 user_paused; struct atomic32 user_paused;
struct atomic_i32 user_paused_steps; struct atomic32 user_paused_steps;
/* Local sim -> user */ /* Local sim -> user */
struct snc_mutex local_to_user_client_mutex; struct snc_mutex local_to_user_client_mutex;
@ -256,7 +256,7 @@ INTERNAL SYS_EXIT_FUNC(user_shutdown)
__prof; __prof;
sys_window_release(G.window); sys_window_release(G.window);
/* Signal shutdown */ /* Signal shutdown */
atomic_i32_fetch_set(&G.shutdown, 1); atomic32_fetch_set(&G.shutdown, 1);
/* Wait for jobs shutdown */ /* Wait for jobs shutdown */
snc_counter_wait(&G.shutdown_job_counters); snc_counter_wait(&G.shutdown_job_counters);
} }
@ -623,13 +623,13 @@ SYS_JOB_DEF(user_update_job, job)
struct sim_snapshot *newest_snapshot = sim_snapshot_from_tick(G.user_unblended_client, G.user_unblended_client->last_tick); struct sim_snapshot *newest_snapshot = sim_snapshot_from_tick(G.user_unblended_client, G.user_unblended_client->last_tick);
G.local_sim_last_known_time_ns = newest_snapshot->sim_time_ns; G.local_sim_last_known_time_ns = newest_snapshot->sim_time_ns;
G.local_sim_last_known_tick = newest_snapshot->tick; G.local_sim_last_known_tick = newest_snapshot->tick;
if (atomic_i32_fetch(&G.user_paused)) { if (atomic32_fetch(&G.user_paused)) {
G.local_sim_predicted_time_ns = G.local_sim_last_known_tick; G.local_sim_predicted_time_ns = G.local_sim_last_known_tick;
} else { } else {
G.local_sim_predicted_time_ns = newest_snapshot->sim_time_ns + (newest_snapshot->sim_dt_ns * tick_progress); G.local_sim_predicted_time_ns = newest_snapshot->sim_time_ns + (newest_snapshot->sim_dt_ns * tick_progress);
} }
if (USER_INTERP_ENABLED && !atomic_i32_fetch(&G.user_paused)) { if (USER_INTERP_ENABLED && !atomic32_fetch(&G.user_paused)) {
/* Determine render time */ /* Determine render time */
G.render_time_target_ns = G.local_sim_predicted_time_ns - (USER_INTERP_RATIO * G.average_local_to_user_snapshot_publish_dt_ns); G.render_time_target_ns = G.local_sim_predicted_time_ns - (USER_INTERP_RATIO * G.average_local_to_user_snapshot_publish_dt_ns);
if (G.average_local_to_user_snapshot_publish_dt_ns > 0) { if (G.average_local_to_user_snapshot_publish_dt_ns > 0) {
@ -1809,9 +1809,9 @@ SYS_JOB_DEF(user_update_job, job)
} }
if (pause_state.num_presses) { if (pause_state.num_presses) {
atomic_i32_fetch_xor(&G.user_paused, 1); atomic32_fetch_xor(&G.user_paused, 1);
} }
atomic_i32_fetch_add(&G.user_paused_steps, step_state.num_presses_and_repeats); atomic32_fetch_add(&G.user_paused_steps, step_state.num_presses_and_repeats);
/* Set user sim control */ /* Set user sim control */
{ {
@ -2214,7 +2214,7 @@ INTERNAL SYS_JOB_DEF(local_sim_job, _)
i64 real_dt_ns = 0; i64 real_dt_ns = 0;
i64 step_dt_ns = NS_FROM_SECONDS(1) / SIM_TICKS_PER_SECOND; i64 step_dt_ns = NS_FROM_SECONDS(1) / SIM_TICKS_PER_SECOND;
f64 compute_timescale = 1.0; f64 compute_timescale = 1.0;
while (!atomic_i32_fetch(&G.shutdown)) { while (!atomic32_fetch(&G.shutdown)) {
struct arena_temp scratch = scratch_begin_no_conflict(); struct arena_temp scratch = scratch_begin_no_conflict();
{ {
__profn("Sim sleep"); __profn("Sim sleep");
@ -2385,10 +2385,10 @@ INTERNAL SYS_JOB_DEF(local_sim_job, _)
} }
} }
b32 should_step = !atomic_i32_fetch(&G.user_paused); b32 should_step = !atomic32_fetch(&G.user_paused);
if (atomic_i32_fetch(&G.user_paused_steps) > 0) { if (atomic32_fetch(&G.user_paused_steps) > 0) {
should_step = 1; should_step = 1;
atomic_i32_fetch_add(&G.user_paused_steps, -1); atomic32_fetch_add(&G.user_paused_steps, -1);
} }
if (!should_step) { if (!should_step) {