From b017a9a477288a294f54e17b5383be2677990c87 Mon Sep 17 00:00:00 2001 From: jacob Date: Wed, 2 Jul 2025 13:26:34 -0500 Subject: [PATCH] rename atomic eval -> fetch --- src/atomic.h | 50 +++++++++++++++++++------------------- src/gp_dx11.c | 6 ++--- src/gp_dx12.c | 8 +++---- src/gstat.h | 8 +++---- src/host.c | 6 ++--- src/incbin.c | 8 +++---- src/job.c | 38 ++++++++++++++--------------- src/log.c | 14 +++++------ src/playback_wasapi.c | 4 ++-- src/resource.c | 12 +++++----- src/scratch.h | 6 ++--- src/sprite.c | 56 +++++++++++++++++++++---------------------- src/sys_win32-old.c | 38 ++++++++++++++--------------- src/sys_win32.c | 42 ++++++++++++++++---------------- src/thread_local.c | 12 +++++----- src/thread_local.h | 6 ++--- src/user.c | 20 ++++++++-------- 17 files changed, 167 insertions(+), 167 deletions(-) diff --git a/src/atomic.h b/src/atomic.h index da229f82..c5f41e82 100644 --- a/src/atomic.h +++ b/src/atomic.h @@ -5,35 +5,35 @@ /* TODO: Remove "..._raw" functions */ -FORCE_INLINE i32 atomic_i32_eval(struct atomic_i32 *x) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); } -FORCE_INLINE i32 atomic_i32_eval_exchange(struct atomic_i32 *x, i32 e) { return (i32)_InterlockedExchange((volatile long *)&x->_v, e); } -FORCE_INLINE i32 atomic_i32_eval_compare_exchange(struct atomic_i32 *x, i32 c, i32 e) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, e, c); } -FORCE_INLINE i32 atomic_i32_eval_xor(struct atomic_i32 *x, i32 c) { return (i32)_InterlockedXor((volatile long *)&x->_v,c); } -FORCE_INLINE i32 atomic_i32_eval_add(struct atomic_i32 *x, i32 a) { return (i32)_InterlockedExchangeAdd((volatile long *)&x->_v, a); } +FORCE_INLINE i32 atomic_i32_fetch(struct atomic_i32 *x) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); } +FORCE_INLINE i32 atomic_i32_fetch_set(struct atomic_i32 *x, i32 e) { return (i32)_InterlockedExchange((volatile long *)&x->_v, e); } +FORCE_INLINE i32 atomic_i32_fetch_test_set(struct atomic_i32 *x, i32 c, i32 e) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, e, c); } +FORCE_INLINE i32 atomic_i32_fetch_xor(struct atomic_i32 *x, i32 c) { return (i32)_InterlockedXor((volatile long *)&x->_v,c); } +FORCE_INLINE i32 atomic_i32_fetch_add(struct atomic_i32 *x, i32 a) { return (i32)_InterlockedExchangeAdd((volatile long *)&x->_v, a); } -FORCE_INLINE i64 atomic_i64_eval(struct atomic_i64 *x) { return (i64)_InterlockedCompareExchange64(&x->_v, 0, 0); } -FORCE_INLINE i64 atomic_i64_eval_exchange(struct atomic_i64 *x, i64 e) { return (i64)_InterlockedExchange64(&x->_v, e); } -FORCE_INLINE i64 atomic_i64_eval_compare_exchange(struct atomic_i64 *x, i64 c, i64 e) { return (i64)_InterlockedCompareExchange64(&x->_v, e, c); } -FORCE_INLINE i64 atomic_i64_eval_xor(struct atomic_i64 *x, i64 c) { return (i64)_InterlockedXor64(&x->_v, c); } -FORCE_INLINE i64 atomic_i64_eval_add(struct atomic_i64 *x, i64 a) { return (i64)_InterlockedExchangeAdd64(&x->_v, a); } +FORCE_INLINE i64 atomic_i64_fetch(struct atomic_i64 *x) { return (i64)_InterlockedCompareExchange64(&x->_v, 0, 0); } +FORCE_INLINE i64 atomic_i64_fetch_set(struct atomic_i64 *x, i64 e) { return (i64)_InterlockedExchange64(&x->_v, e); } +FORCE_INLINE i64 atomic_i64_fetch_test_set(struct atomic_i64 *x, i64 c, i64 e) { return (i64)_InterlockedCompareExchange64(&x->_v, e, c); } +FORCE_INLINE i64 atomic_i64_fetch_xor(struct atomic_i64 *x, i64 c) { return (i64)_InterlockedXor64(&x->_v, c); } +FORCE_INLINE i64 atomic_i64_fetch_add(struct atomic_i64 *x, i64 a) { return (i64)_InterlockedExchangeAdd64(&x->_v, a); } -FORCE_INLINE u32 atomic_u32_eval(struct atomic_u32 *x) { return (u32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); } -FORCE_INLINE u32 atomic_u32_eval_exchange(struct atomic_u32 *x, u32 e) { return (u32)_InterlockedExchange((volatile long *)&x->_v, (long)e); } -FORCE_INLINE u32 atomic_u32_eval_compare_exchange(struct atomic_u32 *x, u32 c, u32 e) { return (u32)_InterlockedCompareExchange((volatile long *)&x->_v, (long)e, (long)c); } -FORCE_INLINE u32 atomic_u32_eval_xor(struct atomic_u32 *x, u32 c) { return (u32)_InterlockedXor((volatile long *)&x->_v, (long)c); } -FORCE_INLINE u32 atomic_u32_eval_add_u32(struct atomic_u32 *x, u32 a) { return (u32)_InterlockedExchangeAdd((volatile long *)&x->_v, (long)a); } -FORCE_INLINE u32 atomic_u32_eval_add_i32(struct atomic_u32 *x, i32 a) { return (u32)_InterlockedExchangeAdd((volatile long *)&x->_v, (long)a); } +FORCE_INLINE u32 atomic_u32_fetch(struct atomic_u32 *x) { return (u32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); } +FORCE_INLINE u32 atomic_u32_fetch_set(struct atomic_u32 *x, u32 e) { return (u32)_InterlockedExchange((volatile long *)&x->_v, (long)e); } +FORCE_INLINE u32 atomic_u32_fetch_test_set(struct atomic_u32 *x, u32 c, u32 e) { return (u32)_InterlockedCompareExchange((volatile long *)&x->_v, (long)e, (long)c); } +FORCE_INLINE u32 atomic_u32_fetch_xor(struct atomic_u32 *x, u32 c) { return (u32)_InterlockedXor((volatile long *)&x->_v, (long)c); } +FORCE_INLINE u32 atomic_u32_fetch_add_u32(struct atomic_u32 *x, u32 a) { return (u32)_InterlockedExchangeAdd((volatile long *)&x->_v, (long)a); } +FORCE_INLINE u32 atomic_u32_fetch_add_i32(struct atomic_u32 *x, i32 a) { return (u32)_InterlockedExchangeAdd((volatile long *)&x->_v, (long)a); } -FORCE_INLINE u64 atomic_u64_eval(struct atomic_u64 *x) { return (u64)_InterlockedCompareExchange64((volatile i64 *)&x->_v, 0, 0); } -FORCE_INLINE u64 atomic_u64_eval_exchange(struct atomic_u64 *x, u64 e) { return (u64)_InterlockedExchange64((volatile i64 *)&x->_v, (i64)e); } -FORCE_INLINE u64 atomic_u64_eval_compare_exchange(struct atomic_u64 *x, u64 c, u64 e) { return (u64)_InterlockedCompareExchange64((volatile i64 *)&x->_v, (i64)e, (i64)c); } -FORCE_INLINE u32 atomic_u64_eval_xor(struct atomic_u64 *x, u64 c) { return (u64)_InterlockedXor64((volatile i64 *)&x->_v, (i64)c); } -FORCE_INLINE u64 atomic_u64_eval_add_u64(struct atomic_u64 *x, u64 a) { return (u64)_InterlockedExchangeAdd64((volatile i64 *)&x->_v, (i64)a); } -FORCE_INLINE u64 atomic_u64_eval_add_i64(struct atomic_u64 *x, i64 a) { return (u64)_InterlockedExchangeAdd64((volatile i64 *)&x->_v, (i64)a); } +FORCE_INLINE u64 atomic_u64_fetch(struct atomic_u64 *x) { return (u64)_InterlockedCompareExchange64((volatile i64 *)&x->_v, 0, 0); } +FORCE_INLINE u64 atomic_u64_fetch_set(struct atomic_u64 *x, u64 e) { return (u64)_InterlockedExchange64((volatile i64 *)&x->_v, (i64)e); } +FORCE_INLINE u64 atomic_u64_fetch_test_set(struct atomic_u64 *x, u64 c, u64 e) { return (u64)_InterlockedCompareExchange64((volatile i64 *)&x->_v, (i64)e, (i64)c); } +FORCE_INLINE u32 atomic_u64_fetch_xor(struct atomic_u64 *x, u64 c) { return (u64)_InterlockedXor64((volatile i64 *)&x->_v, (i64)c); } +FORCE_INLINE u64 atomic_u64_fetch_add_u64(struct atomic_u64 *x, u64 a) { return (u64)_InterlockedExchangeAdd64((volatile i64 *)&x->_v, (i64)a); } +FORCE_INLINE u64 atomic_u64_fetch_add_i64(struct atomic_u64 *x, i64 a) { return (u64)_InterlockedExchangeAdd64((volatile i64 *)&x->_v, (i64)a); } -FORCE_INLINE void *atomic_ptr_eval(struct atomic_ptr *x) { return (void *)_InterlockedCompareExchange64((volatile i64 *)&x->_v, 0, 0); } -FORCE_INLINE void *atomic_ptr_eval_exchange(struct atomic_ptr *x, void *e) { return (void *)_InterlockedExchange64((volatile i64 *)&x->_v, (i64)e); } -FORCE_INLINE void *atomic_ptr_eval_compare_exchange(struct atomic_ptr *x, void *c, void *e) { return (void *)_InterlockedCompareExchange64((volatile i64 *)&x->_v, (i64)e, (i64)c); } +FORCE_INLINE void *atomic_ptr_fetch(struct atomic_ptr *x) { return (void *)_InterlockedCompareExchange64((volatile i64 *)&x->_v, 0, 0); } +FORCE_INLINE void *atomic_ptr_fetch_set(struct atomic_ptr *x, void *e) { return (void *)_InterlockedExchange64((volatile i64 *)&x->_v, (i64)e); } +FORCE_INLINE void *atomic_ptr_fetch_test_set(struct atomic_ptr *x, void *c, void *e) { return (void *)_InterlockedCompareExchange64((volatile i64 *)&x->_v, (i64)e, (i64)c); } #else # error "Atomics not implemented" diff --git a/src/gp_dx11.c b/src/gp_dx11.c index b10760fd..4168a4ef 100644 --- a/src/gp_dx11.c +++ b/src/gp_dx11.c @@ -671,7 +671,7 @@ INTERNAL b32 shader_set_dirty(struct string name) struct dx11_shader_desc *desc = &G.shader_info[i]; struct string desc_name = string_from_cstr_no_limit(desc->name_cstr); if (string_eq(desc_name, name)) { - atomic_i32_eval_exchange(&desc->is_dirty, 1); + atomic_i32_fetch_set(&desc->is_dirty, 1); caused_dirty = true; } else { struct dict *includes_dict = desc->includes_dict; @@ -679,7 +679,7 @@ INTERNAL b32 shader_set_dirty(struct string name) struct sys_lock lock = sys_mutex_lock_e(desc->includes_mutex); { if (dict_get(includes_dict, hash) != 0) { - atomic_i32_eval_exchange(&desc->is_dirty, 1); + atomic_i32_fetch_set(&desc->is_dirty, 1); caused_dirty = true; } } @@ -691,7 +691,7 @@ INTERNAL b32 shader_set_dirty(struct string name) INTERNAL b32 shader_unset_dirty(struct dx11_shader_desc *desc) { - return atomic_i32_eval_compare_exchange(&desc->is_dirty, 1, 0) == 1; + return atomic_i32_fetch_test_set(&desc->is_dirty, 1, 0) == 1; } #endif diff --git a/src/gp_dx12.c b/src/gp_dx12.c index 273c10fd..c1e143f8 100644 --- a/src/gp_dx12.c +++ b/src/gp_dx12.c @@ -411,7 +411,7 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(gp_shutdown) (UNUSED)command_queue_release; #endif - atomic_i32_eval_exchange(&G.evictor_thread_shutdown, 1); + atomic_i32_fetch_set(&G.evictor_thread_shutdown, 1); SetEvent(G.evictor_thread_wake_event); sys_thread_wait_release(G.evictor_thread); } @@ -2843,7 +2843,7 @@ INTERNAL SYS_THREAD_DEF(evictor_thread_entry_point, arg) u64 completed_targets[DX12_NUM_QUEUES] = ZI; - b32 shutdown = atomic_i32_eval(&G.evictor_thread_shutdown); + b32 shutdown = atomic_i32_fetch(&G.evictor_thread_shutdown); while (!shutdown) { struct arena_temp temp = arena_temp_begin(scratch.arena); { @@ -2877,7 +2877,7 @@ INTERNAL SYS_THREAD_DEF(evictor_thread_entry_point, arg) { __profscope(Wait on fence); WaitForMultipleObjects(2, events, false, INFINITE); - shutdown = atomic_i32_eval(&G.evictor_thread_shutdown); + shutdown = atomic_i32_fetch(&G.evictor_thread_shutdown); } } } @@ -2913,7 +2913,7 @@ INTERNAL SYS_THREAD_DEF(evictor_thread_entry_point, arg) { __profscope(Sleep); WaitForSingleObject(G.evictor_thread_wake_event, INFINITE); - shutdown = atomic_i32_eval(&G.evictor_thread_shutdown); + shutdown = atomic_i32_fetch(&G.evictor_thread_shutdown); } } diff --git a/src/gstat.h b/src/gstat.h index ca2ee7c6..53e1b352 100644 --- a/src/gstat.h +++ b/src/gstat.h @@ -17,10 +17,10 @@ struct _gstats { extern struct _gstats _g_gstats; -#define gstat_set(name, v) atomic_u64_eval_exchange(&_g_gstats.name, (v)) -#define gstat_add(name, v) atomic_u64_eval_add_u64(&_g_gstats.name, (v)) -#define gstat_sub(name, v) atomic_u64_eval_add_i64(&_g_gstats.name, -((i64)(v))) -#define gstat_get(name) atomic_u64_eval(&_g_gstats.name) +#define gstat_set(name, v) atomic_u64_fetch_set(&_g_gstats.name, (v)) +#define gstat_add(name, v) atomic_u64_fetch_add_u64(&_g_gstats.name, (v)) +#define gstat_sub(name, v) atomic_u64_fetch_add_i64(&_g_gstats.name, -((i64)(v))) +#define gstat_get(name) atomic_u64_fetch(&_g_gstats.name) #else diff --git a/src/host.c b/src/host.c index ac24bfef..d4d09bfc 100644 --- a/src/host.c +++ b/src/host.c @@ -207,7 +207,7 @@ struct host *host_alloc(u16 listen_port) void host_release(struct host *host) { - atomic_i32_eval_exchange(&host->receiver_thread_shutdown_flag, 1); + atomic_i32_fetch_set(&host->receiver_thread_shutdown_flag, 1); sock_wake(host->sock); while (!sys_thread_try_release(host->receiver_thread, 0.001f)) { sock_wake(host->sock); @@ -1076,10 +1076,10 @@ INTERNAL SYS_THREAD_DEF(host_receiver_thread_entry_point, arg) socks.count = 1; struct atomic_i32 *shutdown = &host->receiver_thread_shutdown_flag; - while (!atomic_i32_eval(shutdown)) { + while (!atomic_i32_fetch(shutdown)) { struct sock *sock = sock_wait_for_available_read(socks, F32_INFINITY); struct sock_read_result res; - while (!atomic_i32_eval(shutdown) && sock && (res = sock_read(sock, read_buff)).valid) { + while (!atomic_i32_fetch(shutdown) && sock && (res = sock_read(sock, read_buff)).valid) { struct sock_address address = res.address; struct string data = res.data; if (data.len > 0) { diff --git a/src/incbin.c b/src/incbin.c index 75f140bc..c96dd27d 100644 --- a/src/incbin.c +++ b/src/incbin.c @@ -47,12 +47,12 @@ INTERNAL BOOL CALLBACK enum_func(HMODULE module, LPCWSTR type, LPCWSTR wstr_entr struct string _incbin_get(struct _incbin_rc_resource *inc) { - enum _incbin_state state = atomic_i32_eval(&inc->state); + enum _incbin_state state = atomic_i32_fetch(&inc->state); if (state != INCBIN_STATE_SEARCHED) { struct arena_temp scratch = scratch_begin_no_conflict(); if (state == INCBIN_STATE_UNSEARCHED) { - enum _incbin_state v = atomic_i32_eval_compare_exchange(&inc->state, state, INCBIN_STATE_SEARCHING); + enum _incbin_state v = atomic_i32_fetch_test_set(&inc->state, state, INCBIN_STATE_SEARCHING); if (v == state) { /* Search RC file for the resource name */ struct string name_lower = string_lower(scratch.arena, inc->rc_name); @@ -65,7 +65,7 @@ struct string _incbin_get(struct _incbin_rc_resource *inc) } inc->data = params.data; state = INCBIN_STATE_SEARCHED; - atomic_i32_eval_exchange(&inc->state, state); + atomic_i32_fetch_set(&inc->state, state); } else { state = v; } @@ -74,7 +74,7 @@ struct string _incbin_get(struct _incbin_rc_resource *inc) /* Spin while another thread searches */ while (state != INCBIN_STATE_SEARCHED) { ix_pause(); - state = atomic_i32_eval(&inc->state); + state = atomic_i32_fetch(&inc->state); } scratch_end(scratch); diff --git a/src/job.c b/src/job.c index 05dd648c..35d318c0 100644 --- a/src/job.c +++ b/src/job.c @@ -92,7 +92,7 @@ void job_startup(i32 num_workers, struct string *worker_names) struct string name = worker_names[i]; G.worker_threads[i] = sys_thread_alloc(worker_thread_entry_point, (void *)(i64)i, name); } - atomic_i32_eval_exchange(&G.num_idle_worker_threads, num_workers); + atomic_i32_fetch_set(&G.num_idle_worker_threads, num_workers); app_register_exit_callback(job_shutdown); @@ -121,14 +121,14 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(job_shutdown) INTERNAL void atomic_lock(void) { - while (atomic_i32_eval_compare_exchange(&G.atomic_lock, 0, 1) != 0) { + while (atomic_i32_fetch_test_set(&G.atomic_lock, 0, 1) != 0) { ix_pause(); } } INTERNAL void atomic_unlock(void) { - atomic_i32_eval_exchange(&G.atomic_lock, 0); + atomic_i32_fetch_set(&G.atomic_lock, 0); } /* ========================== * @@ -162,7 +162,7 @@ struct job_desc { INTERNAL struct job_handle job_dispatch_ex(struct job_desc desc) { - struct worker_ctx *ctx = thread_local_var_eval(&tl_worker_ctx); + struct worker_ctx *ctx = thread_local_var_fetch(&tl_worker_ctx); i32 worker_id = ctx->worker_id; job_func *job_func = desc.func; @@ -209,14 +209,14 @@ INTERNAL struct job_handle job_dispatch_ex(struct job_desc desc) G.first_free_job = job->next_free; old_cv_mutex = job->gen_cv_mutex; old_cv = job->gen_cv; - gen = atomic_u64_eval(&job->gen) + 1; + gen = atomic_u64_fetch(&job->gen) + 1; } else { job = arena_push_no_zero(G.arena, struct worker_job); gen = 1; } atomic_unlock(); } - atomic_u64_eval_exchange(&job->gen, 0); + atomic_u64_fetch_set(&job->gen, 0); MEMZERO_STRUCT(job); if (old_cv_mutex) { job->gen_cv_mutex = old_cv_mutex; @@ -229,7 +229,7 @@ INTERNAL struct job_handle job_dispatch_ex(struct job_desc desc) job->count = job_count; job->func = job_func; job->sig = sig; - atomic_u64_eval_exchange(&job->gen, gen); + atomic_u64_fetch_set(&job->gen, gen); { /* Signal mutex change */ if (old_cv_mutex) { @@ -334,7 +334,7 @@ INTERNAL struct job_handle job_dispatch_ex(struct job_desc desc) G.first_free_job = job; } /* Signal waiters */ - atomic_u64_eval_add_u64(&job->gen, 1); + atomic_u64_fetch_add_u64(&job->gen, 1); { struct sys_lock cv_lock = sys_mutex_lock_e(job->gen_cv_mutex); sys_condition_variable_broadcast(job->gen_cv); @@ -353,10 +353,10 @@ INTERNAL struct job_handle job_dispatch_ex(struct job_desc desc) if (wait && !is_done) { __profscope(Wait for job); struct sys_lock cv_lock = sys_mutex_lock_s(job->gen_cv_mutex); - is_done = atomic_u64_eval(&job->gen) != handle.gen; + is_done = atomic_u64_fetch(&job->gen) != handle.gen; while (!is_done) { sys_condition_variable_wait(job->gen_cv, &cv_lock); - is_done = atomic_u64_eval(&job->gen) != handle.gen; + is_done = atomic_u64_fetch(&job->gen) != handle.gen; } sys_mutex_unlock(&cv_lock); } @@ -404,9 +404,9 @@ void job_wait(struct job_handle handle) { struct worker_job *job = handle.job; if (job && handle.gen) { - b32 is_done = atomic_u64_eval(&job->gen) != handle.gen; + b32 is_done = atomic_u64_fetch(&job->gen) != handle.gen; if (!is_done) { - struct worker_ctx *ctx = thread_local_var_eval(&tl_worker_ctx); + struct worker_ctx *ctx = thread_local_var_fetch(&tl_worker_ctx); i32 worker_id = ctx->worker_id; i32 job_pinned_worker = job->pinned_worker_id; if (worker_id >= 0 && (job_pinned_worker < 0 || job_pinned_worker == worker_id)) { @@ -465,7 +465,7 @@ void job_wait(struct job_handle handle) G.first_free_job = job; } /* Signal waiters */ - atomic_u64_eval_add_u64(&job->gen, 1); + atomic_u64_fetch_add_u64(&job->gen, 1); { struct sys_lock cv_lock = sys_mutex_lock_e(job->gen_cv_mutex); sys_condition_variable_broadcast(job->gen_cv); @@ -483,10 +483,10 @@ void job_wait(struct job_handle handle) if (!is_done) { __profscope(Wait for job); struct sys_lock cv_lock = sys_mutex_lock_s(job->gen_cv_mutex); - is_done = atomic_u64_eval(&job->gen) != handle.gen; + is_done = atomic_u64_fetch(&job->gen) != handle.gen; while (!is_done) { sys_condition_variable_wait(job->gen_cv, &cv_lock); - is_done = atomic_u64_eval(&job->gen) != handle.gen; + is_done = atomic_u64_fetch(&job->gen) != handle.gen; } sys_mutex_unlock(&cv_lock); } @@ -502,7 +502,7 @@ INTERNAL SYS_THREAD_DEF(worker_thread_entry_point, thread_arg) { i32 worker_id = (i32)(i64)thread_arg; - struct worker_ctx *ctx = thread_local_var_eval(&tl_worker_ctx); + struct worker_ctx *ctx = thread_local_var_fetch(&tl_worker_ctx); ctx->worker_id = worker_id; struct worker_job_queue *queues[] = { &G.pinned_queues[worker_id], &G.global_queue }; @@ -550,7 +550,7 @@ INTERNAL SYS_THREAD_DEF(worker_thread_entry_point, thread_arg) if (job) { __profscope(Execute job); ctx->pin_depth += job_is_pinned_to_worker; - atomic_i32_eval_add(&G.num_idle_worker_threads, -1); + atomic_i32_fetch_add(&G.num_idle_worker_threads, -1); struct job_data data = ZI; data.sig = job->sig; job_func *func = job->func; @@ -591,7 +591,7 @@ INTERNAL SYS_THREAD_DEF(worker_thread_entry_point, thread_arg) G.first_free_job = job; } /* Signal waiters */ - atomic_u64_eval_add_u64(&job->gen, 1); + atomic_u64_fetch_add_u64(&job->gen, 1); { struct sys_lock cv_lock = sys_mutex_lock_e(job->gen_cv_mutex); sys_condition_variable_broadcast(job->gen_cv); @@ -602,7 +602,7 @@ INTERNAL SYS_THREAD_DEF(worker_thread_entry_point, thread_arg) atomic_unlock(); } } - atomic_i32_eval_add(&G.num_idle_worker_threads, 1); + atomic_i32_fetch_add(&G.num_idle_worker_threads, 1); ctx->pin_depth -= job_is_pinned_to_worker; } diff --git a/src/log.c b/src/log.c index cdbdfee7..a0d935bb 100644 --- a/src/log.c +++ b/src/log.c @@ -74,7 +74,7 @@ void log_startup(struct string logfile_path) G.file_valid = true; } } - atomic_i32_eval_exchange(&G.initialized, 1); + atomic_i32_fetch_set(&G.initialized, 1); } /* ========================== * @@ -83,7 +83,7 @@ void log_startup(struct string logfile_path) void log_register_callback(log_event_callback_func *func, i32 level) { - if (!atomic_i32_eval(&G.initialized)) { return; } + if (!atomic_i32_fetch(&G.initialized)) { return; } struct sys_lock lock = sys_mutex_lock_e(G.callbacks_mutex); { struct log_event_callback *callback = arena_push(G.callbacks_arena, struct log_event_callback); @@ -106,7 +106,7 @@ void log_register_callback(log_event_callback_func *func, i32 level) INTERNAL void append_to_logfile(struct string msg) { __prof; - if (!atomic_i32_eval(&G.initialized)) { return; } + if (!atomic_i32_fetch(&G.initialized)) { return; } if (G.file_valid) { struct arena_temp scratch = scratch_begin_no_conflict(); @@ -120,7 +120,7 @@ INTERNAL void append_to_logfile(struct string msg) * writing to log file. */ void _log_panic(struct string msg) { - if (!atomic_i32_eval(&G.initialized)) { return; } + if (!atomic_i32_fetch(&G.initialized)) { return; } if (G.file_valid) { sys_file_write(G.file, LIT("******** PANICKING ********\n")); @@ -136,7 +136,7 @@ void _log(i32 level, struct string msg) #endif { __prof; - if (!atomic_i32_eval(&G.initialized)) { return; } + if (!atomic_i32_fetch(&G.initialized)) { return; } if (level < 0 || level >= LOG_LEVEL_COUNT) { sys_panic(LIT("Invalid log level")); @@ -232,7 +232,7 @@ void _logfv(i32 level, struct string file, u32 line, struct string fmt, va_list void _logfv(i32 level, struct string fmt, va_list args) #endif { - if (!atomic_i32_eval(&G.initialized)) { return; } + if (!atomic_i32_fetch(&G.initialized)) { return; } struct arena_temp scratch = scratch_begin_no_conflict(); struct string msg = string_formatv(scratch.arena, fmt, args); #if LOG_INCLUDE_SOURCE_LOCATION @@ -249,7 +249,7 @@ void _logf(i32 level, struct string file, u32 line, struct string fmt, ...) void _logf(i32 level, struct string fmt, ...) #endif { - if (!atomic_i32_eval(&G.initialized)) { return; } + if (!atomic_i32_fetch(&G.initialized)) { return; } va_list args; va_start(args, fmt); #if LOG_INCLUDE_SOURCE_LOCATION diff --git a/src/playback_wasapi.c b/src/playback_wasapi.c index 6f3aa369..9cf12b34 100644 --- a/src/playback_wasapi.c +++ b/src/playback_wasapi.c @@ -68,7 +68,7 @@ struct playback_startup_receipt playback_startup(struct mixer_startup_receipt *m INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(playback_shutdown) { __prof; - atomic_i32_eval_exchange(&G.shutdown, true); + atomic_i32_fetch_set(&G.shutdown, true); } /* ========================== * @@ -240,7 +240,7 @@ INTERNAL JOB_DEF(playback_job, _) /* FIXME: If playback fails at any point and mixer stops advancing, we * need to halt mixer to prevent memory leak when sounds are played. */ - while (!atomic_i32_eval(&G.shutdown)) { + while (!atomic_i32_fetch(&G.shutdown)) { struct arena_temp temp = arena_temp_begin(scratch.arena); struct wasapi_buffer wspbuf = wasapi_update_begin(); struct mixed_pcm_f32 pcm = mixer_update(temp.arena, wspbuf.frames_count); diff --git a/src/resource.c b/src/resource.c index 16674b30..9e99cdb2 100644 --- a/src/resource.c +++ b/src/resource.c @@ -161,7 +161,7 @@ void resource_close(struct resource *res_ptr) INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(resource_shutdown) { __prof; - atomic_i32_eval_exchange(&G.watch_shutdown, 1); + atomic_i32_fetch_set(&G.watch_shutdown, 1); sys_condition_variable_broadcast(G.watch_dispatcher_cv); sys_watch_wake(G.watch); @@ -188,10 +188,10 @@ INTERNAL SYS_THREAD_DEF(resource_watch_monitor_thread_entry_point, _) (UNUSED)_; struct arena_temp scratch = scratch_begin_no_conflict(); - while (!atomic_i32_eval(&G.watch_shutdown)) { + while (!atomic_i32_fetch(&G.watch_shutdown)) { struct arena_temp temp = arena_temp_begin(scratch.arena); struct sys_watch_info_list res = sys_watch_wait(temp.arena, G.watch); - if (res.first && !atomic_i32_eval(&G.watch_shutdown)) { + if (res.first && !atomic_i32_fetch(&G.watch_shutdown)) { struct sys_lock lock = sys_mutex_lock_e(G.watch_dispatcher_mutex); { struct sys_watch_info_list list_part = sys_watch_info_copy(G.watch_dispatcher_info_arena, res); @@ -239,9 +239,9 @@ INTERNAL SYS_THREAD_DEF(resource_watch_dispatcher_thread_entry_point, _) struct arena_temp scratch = scratch_begin_no_conflict(); struct sys_lock watch_dispatcher_lock = sys_mutex_lock_e(G.watch_dispatcher_mutex); - while (!atomic_i32_eval(&G.watch_shutdown)) { + while (!atomic_i32_fetch(&G.watch_shutdown)) { sys_condition_variable_wait(G.watch_dispatcher_cv, &watch_dispatcher_lock); - if (!atomic_i32_eval(&G.watch_shutdown) && G.watch_dispatcher_info_arena->pos > 0) { + if (!atomic_i32_fetch(&G.watch_shutdown) && G.watch_dispatcher_info_arena->pos > 0) { __profscope(Dispatch resource watch callbacks); /* Unlock and sleep a bit so duplicate events pile up */ { @@ -250,7 +250,7 @@ INTERNAL SYS_THREAD_DEF(resource_watch_dispatcher_thread_entry_point, _) sys_sleep(WATCH_DISPATCHER_DELAY_SECONDS); watch_dispatcher_lock = sys_mutex_lock_e(G.watch_dispatcher_mutex); } - if (!atomic_i32_eval(&G.watch_shutdown)) { + if (!atomic_i32_fetch(&G.watch_shutdown)) { struct arena_temp temp = arena_temp_begin(scratch.arena); /* Pull watch info from queue */ diff --git a/src/scratch.h b/src/scratch.h index 43538f38..b8a00dae 100644 --- a/src/scratch.h +++ b/src/scratch.h @@ -60,7 +60,7 @@ INLINE struct arena_temp _scratch_begin(struct arena *potential_conflict) /* Use `scratch_begin_no_conflict` if no conflicts are present */ ASSERT(potential_conflict != NULL); - struct scratch_ctx *ctx = (struct scratch_ctx *)thread_local_var_eval(&tl_scratch_ctx); + struct scratch_ctx *ctx = (struct scratch_ctx *)thread_local_var_fetch(&tl_scratch_ctx); struct arena *scratch_arena = ctx->arenas[0]; if (potential_conflict && scratch_arena == potential_conflict) { scratch_arena = ctx->arenas[1]; @@ -84,7 +84,7 @@ INLINE struct arena_temp _scratch_begin(struct arena *potential_conflict) INLINE struct arena_temp _scratch_begin_no_conflict(void) { - struct scratch_ctx *ctx = (struct scratch_ctx *)thread_local_var_eval(&tl_scratch_ctx); + struct scratch_ctx *ctx = (struct scratch_ctx *)thread_local_var_fetch(&tl_scratch_ctx); struct arena *scratch_arena = ctx->arenas[0]; struct arena_temp temp = arena_temp_begin(scratch_arena); scratch_dbg_push(ctx, &temp); @@ -98,7 +98,7 @@ INLINE struct arena_temp _scratch_begin_no_conflict(void) INLINE void scratch_end(struct arena_temp scratch_temp) { #if RTC - struct scratch_ctx *ctx = (struct scratch_ctx *)thread_local_var_eval(&tl_scratch_ctx); + struct scratch_ctx *ctx = (struct scratch_ctx *)thread_local_var_fetch(&tl_scratch_ctx); if (ctx->scratch_id_stack_count > 0) { u64 scratch_id = scratch_temp.scratch_id; u64 expected_id = ctx->scratch_id_stack[--ctx->scratch_id_stack_count]; diff --git a/src/sprite.c b/src/sprite.c index 3cb005af..b05b95fd 100644 --- a/src/sprite.c +++ b/src/sprite.c @@ -68,7 +68,7 @@ struct cache_entry { enum cache_entry_kind kind; struct cache_entry_hash hash; struct atomic_i32 state; - struct atomic_u64 refcount_struct; /* Cast eval to `cache_refcount` */ + struct atomic_u64 refcount_struct; /* Cast fetched result to `cache_refcount` */ /* Allocated data */ /* NOTE: This data is finalized once entry state = loaded */ @@ -346,7 +346,7 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag t struct arena_temp scratch = scratch_begin_no_conflict(); struct cache_entry *e = ref.e; - atomic_i32_eval_exchange(&e->state, CACHE_ENTRY_STATE_WORKING); + atomic_i32_fetch_set(&e->state, CACHE_ENTRY_STATE_WORKING); struct string path = tag.path; logf_info("Loading sprite texture [%F] \"%F\"", FMT_HEX(e->hash.v), FMT_STR(path)); @@ -388,7 +388,7 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag t } arena_set_readonly(e->arena); e->memory_usage = e->arena->committed + memory_size; - atomic_u64_eval_add_u64(&G.cache.memory_usage, e->memory_usage); + atomic_u64_fetch_add_u64(&G.cache.memory_usage, e->memory_usage); if (success) { logf_success("Loaded sprite texture [%F] \"%F\" in %F seconds (cache size: %F bytes).", @@ -398,7 +398,7 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag t FMT_UINT(e->memory_usage)); } - atomic_i32_eval_exchange(&e->state, CACHE_ENTRY_STATE_LOADED); + atomic_i32_fetch_set(&e->state, CACHE_ENTRY_STATE_LOADED); #if RESOURCE_RELOADING struct cache_bin *bin = &G.cache.bins[e->hash.v % CACHE_BINS_COUNT]; @@ -406,7 +406,7 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag t { for (struct cache_entry *old_entry = bin->first; old_entry; old_entry = old_entry->next_in_bin) { if (old_entry != e && old_entry->hash.v == e->hash.v) { - atomic_i32_eval_exchange(&old_entry->out_of_date, 1); + atomic_i32_fetch_set(&old_entry->out_of_date, 1); } } e->load_time_ns = sys_time_ns(); @@ -669,7 +669,7 @@ INTERNAL void cache_entry_load_sheet(struct cache_ref ref, struct sprite_tag tag struct arena_temp scratch = scratch_begin_no_conflict(); struct cache_entry *e = ref.e; - atomic_i32_eval_exchange(&e->state, CACHE_ENTRY_STATE_WORKING); + atomic_i32_fetch_set(&e->state, CACHE_ENTRY_STATE_WORKING); struct string path = tag.path; logf_info("Loading sprite sheet [%F] \"%F\"", FMT_HEX(e->hash.v), FMT_STR(path)); @@ -709,7 +709,7 @@ INTERNAL void cache_entry_load_sheet(struct cache_ref ref, struct sprite_tag tag } arena_set_readonly(e->arena); e->memory_usage = e->arena->committed; - atomic_u64_eval_add_u64(&G.cache.memory_usage, e->memory_usage); + atomic_u64_fetch_add_u64(&G.cache.memory_usage, e->memory_usage); if (success) { logf_success("Loaded sprite sheet [%F] \"%F\" in %F seconds (cache size: %F bytes).", @@ -719,7 +719,7 @@ INTERNAL void cache_entry_load_sheet(struct cache_ref ref, struct sprite_tag tag FMT_UINT(e->memory_usage)); } - atomic_i32_eval_exchange(&e->state, CACHE_ENTRY_STATE_LOADED); + atomic_i32_fetch_set(&e->state, CACHE_ENTRY_STATE_LOADED); #if RESOURCE_RELOADING struct cache_bin *bin = &G.cache.bins[e->hash.v % CACHE_BINS_COUNT]; @@ -727,7 +727,7 @@ INTERNAL void cache_entry_load_sheet(struct cache_ref ref, struct sprite_tag tag { for (struct cache_entry *old_entry = bin->first; old_entry; old_entry = old_entry->next_in_bin) { if (old_entry != e && old_entry->hash.v == e->hash.v) { - atomic_i32_eval_exchange(&old_entry->out_of_date, 1); + atomic_i32_fetch_set(&old_entry->out_of_date, 1); } } e->load_time_ns = sys_time_ns(); @@ -744,14 +744,14 @@ INTERNAL void cache_entry_load_sheet(struct cache_ref ref, struct sprite_tag tag INTERNAL void refcount_add(struct cache_entry *e, i32 amount) { - i32 evictor_cycle = atomic_i32_eval(&G.evictor_cycle); + i32 evictor_cycle = atomic_i32_fetch(&G.evictor_cycle); struct atomic_u64 *refcount_atomic = &e->refcount_struct; - u64 old_refcount_uncast = atomic_u64_eval(refcount_atomic); + u64 old_refcount_uncast = atomic_u64_fetch(refcount_atomic); do { struct cache_refcount new_refcount = *(struct cache_refcount *)&old_refcount_uncast; new_refcount.count += amount; new_refcount.last_ref_cycle = evictor_cycle; - u64 v = atomic_u64_eval_compare_exchange(refcount_atomic, old_refcount_uncast, *(u64 *)&new_refcount); + u64 v = atomic_u64_fetch_test_set(refcount_atomic, old_refcount_uncast, *(u64 *)&new_refcount); if (v != old_refcount_uncast) { old_refcount_uncast = v; } else { @@ -814,7 +814,7 @@ struct sprite_scope *sprite_scope_begin(void) struct sprite_scope_cache_ref **bins = NULL; struct sprite_scope_cache_ref *pool = NULL; { - while (atomic_i32_eval_compare_exchange(&G.scopes_lock, 0, 1) != 0) ix_pause(); + while (atomic_i32_fetch_test_set(&G.scopes_lock, 0, 1) != 0) ix_pause(); { if (G.first_free_scope) { res = G.first_free_scope; @@ -827,7 +827,7 @@ struct sprite_scope *sprite_scope_begin(void) pool = arena_push_array_no_zero(G.scopes_arena, struct sprite_scope_cache_ref, MAX_SCOPE_REFERENCES); } } - atomic_i32_eval_exchange(&G.scopes_lock, 0); + atomic_i32_fetch_set(&G.scopes_lock, 0); } MEMZERO_STRUCT(res); MEMZERO(bins, sizeof(*bins) * CACHE_BINS_COUNT); @@ -846,12 +846,12 @@ void sprite_scope_end(struct sprite_scope *scope) } /* Release scope */ - while (atomic_i32_eval_compare_exchange(&G.scopes_lock, 0, 1) != 0) ix_pause(); + while (atomic_i32_fetch_test_set(&G.scopes_lock, 0, 1) != 0) ix_pause(); { scope->next_free = G.first_free_scope; G.first_free_scope = scope; } - atomic_i32_eval_exchange(&G.scopes_lock, 0); + atomic_i32_fetch_set(&G.scopes_lock, 0); } /* ========================== * @@ -873,7 +873,7 @@ INTERNAL struct sprite_scope_cache_ref *cache_lookup(struct sprite_scope *scope, enum cache_entry_state match_state = CACHE_ENTRY_STATE_NONE; for (struct cache_entry *entry = bin->first; entry; entry = entry->next_in_bin) { if (entry->hash.v == hash.v) { - enum cache_entry_state entry_state = atomic_i32_eval(&entry->state); + enum cache_entry_state entry_state = atomic_i32_fetch(&entry->state); if (!match || entry_state > match_state || (entry_state == CACHE_ENTRY_STATE_LOADED && match_state == CACHE_ENTRY_STATE_LOADED && entry->load_time_ns > match->load_time_ns)) { match = entry; match_state = entry_state; @@ -987,7 +987,7 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_ struct sprite_scope_cache_ref *scope_ref = cache_entry_from_tag(scope, tag, kind, false); struct cache_ref ref = scope_ref->ref; - enum cache_entry_state state = atomic_i32_eval(&ref.e->state); + enum cache_entry_state state = atomic_i32_fetch(&ref.e->state); if (state == CACHE_ENTRY_STATE_LOADED) { switch (kind) { case CACHE_ENTRY_KIND_TEXTURE: { res = ref.e->texture; } break; @@ -996,7 +996,7 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_ } } else if (state == CACHE_ENTRY_STATE_NONE) { /* If entry is new, load texture */ - if (atomic_i32_eval_compare_exchange(&ref.e->state, CACHE_ENTRY_STATE_NONE, CACHE_ENTRY_STATE_QUEUED) == CACHE_ENTRY_STATE_NONE) { + if (atomic_i32_fetch_test_set(&ref.e->state, CACHE_ENTRY_STATE_NONE, CACHE_ENTRY_STATE_QUEUED) == CACHE_ENTRY_STATE_NONE) { /* If caller is awaiting result then just load now on the calling thread. Otherwise spawn a work task. */ if (await) { switch (kind) { @@ -1019,7 +1019,7 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_ /* Spinlock until result is ready */ if (await && state != CACHE_ENTRY_STATE_LOADED) { - while (atomic_i32_eval(&ref.e->state) != CACHE_ENTRY_STATE_LOADED) { + while (atomic_i32_fetch(&ref.e->state) != CACHE_ENTRY_STATE_LOADED) { ix_pause(); } } @@ -1238,10 +1238,10 @@ INTERNAL JOB_DEF(sprite_evictor_job, _) u64 evict_array_count = 0; struct evict_node *evict_array = arena_push_dry(scratch.arena, struct evict_node); { - i32 cur_cycle = atomic_i32_eval(&G.evictor_cycle); + i32 cur_cycle = atomic_i32_fetch(&G.evictor_cycle); /* Scan for evictable nodes */ - b32 cache_over_budget_threshold = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET_THRESHOLD; + b32 cache_over_budget_threshold = atomic_u64_fetch(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET_THRESHOLD; if (cache_over_budget_threshold || RESOURCE_RELOADING) { __profscope(Evictor scan); for (u64 i = 0; i < CACHE_BINS_COUNT; ++i) { @@ -1250,12 +1250,12 @@ INTERNAL JOB_DEF(sprite_evictor_job, _) { struct cache_entry *n = bin->first; while (n) { - u64 refcount_uncast = atomic_u64_eval(&n->refcount_struct); + u64 refcount_uncast = atomic_u64_fetch(&n->refcount_struct); struct cache_refcount refcount = *(struct cache_refcount *)&refcount_uncast; if (refcount.count <= 0) { /* Add node to evict list */ #if RESOURCE_RELOADING - b32 is_out_of_date = atomic_i32_eval(&n->out_of_date); + b32 is_out_of_date = atomic_i32_fetch(&n->out_of_date); #else b32 is_out_of_date = false; #endif @@ -1298,10 +1298,10 @@ INTERNAL JOB_DEF(sprite_evictor_job, _) struct cache_bin *bin = en->cache_bin; struct cache_entry *entry = en->cache_entry; i32 last_ref_cycle = en->last_ref_cycle; - b32 cache_over_budget_target = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET_TARGET; + b32 cache_over_budget_target = atomic_u64_fetch(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET_TARGET; struct sys_lock bin_lock = sys_mutex_lock_e(bin->mutex); { - u64 refcount_uncast = atomic_u64_eval(&entry->refcount_struct); + u64 refcount_uncast = atomic_u64_fetch(&entry->refcount_struct); struct cache_refcount refcount = *(struct cache_refcount *)&refcount_uncast; if (refcount.count > 0 || (last_ref_cycle >= 0 && refcount.last_ref_cycle != en->last_ref_cycle)) { /* Cache node has been referenced since scan, skip node. */ @@ -1320,7 +1320,7 @@ INTERNAL JOB_DEF(sprite_evictor_job, _) bin->last = prev; } - atomic_u64_eval_add_i64(&G.cache.memory_usage, -((i64)entry->memory_usage)); + atomic_u64_fetch_add_i64(&G.cache.memory_usage, -((i64)entry->memory_usage)); /* Add to evicted list */ en->next_evicted = first_evicted; @@ -1360,7 +1360,7 @@ INTERNAL JOB_DEF(sprite_evictor_job, _) } } } - atomic_i32_eval_add(&G.evictor_cycle, 1); + atomic_i32_fetch_add(&G.evictor_cycle, 1); scratch_end(scratch); } diff --git a/src/sys_win32-old.c b/src/sys_win32-old.c index 211e3b1e..4dbeed08 100644 --- a/src/sys_win32-old.c +++ b/src/sys_win32-old.c @@ -960,14 +960,14 @@ INTERNAL SYS_THREAD_DEF(window_thread_entry_point, arg) sync_flag_set(&window->ready_sf); - while (!atomic_i32_eval(&window->event_thread_shutdown)) { + while (!atomic_i32_fetch(&window->event_thread_shutdown)) { MSG msg = ZI; { GetMessageW(&msg, 0, 0, 0); } { __profscope(Process window message); - if (atomic_i32_eval(&window->event_thread_shutdown)) { + if (atomic_i32_fetch(&window->event_thread_shutdown)) { break; } @@ -1073,7 +1073,7 @@ INTERNAL void win32_window_release(struct win32_window *window) G.first_free_window = window; /* Stop window thread */ - atomic_i32_eval_exchange(&window->event_thread_shutdown, 1); + atomic_i32_fetch_set(&window->event_thread_shutdown, 1); win32_window_wake(window); sys_thread_wait_release(window->event_thread); @@ -1631,7 +1631,7 @@ void sys_mutex_release(struct sys_mutex *mutex) { __prof; struct win32_mutex *m = (struct win32_mutex *)mutex; - ASSERT(atomic_i64_eval(&m->count) == 0); /* Mutex should be unlocked */ + ASSERT(atomic_i64_fetch(&m->count) == 0); /* Mutex should be unlocked */ { struct sys_lock lock = sys_mutex_lock_e(G.mutexes_mutex); m->next_free = G.first_free_mutex; @@ -1648,7 +1648,7 @@ struct sys_lock sys_mutex_lock_e(struct sys_mutex *mutex) __proflock_after_exclusive_lock(m->profiling_ctx); #if RTC m->owner_tid = (u64)GetCurrentThreadId(); - atomic_i64_eval_add(&m->count, 1); + atomic_i64_fetch_add(&m->count, 1); #endif struct sys_lock lock = ZI; lock.exclusive = true; @@ -1663,7 +1663,7 @@ struct sys_lock sys_mutex_lock_s(struct sys_mutex *mutex) AcquireSRWLockShared((SRWLOCK *)&m->srwlock); __proflock_after_shared_lock(m->profiling_ctx); #if RTC - atomic_i64_eval_add(&m->count, 1); + atomic_i64_fetch_add(&m->count, 1); #endif struct sys_lock lock = ZI; lock.mutex = mutex; @@ -1674,7 +1674,7 @@ void sys_mutex_unlock(struct sys_lock *lock) { struct win32_mutex *m = (struct win32_mutex *)lock->mutex; #if RTC - atomic_i64_eval_add(&m->count, -1); + atomic_i64_fetch_add(&m->count, -1); m->owner_tid = 0; #endif if (lock->exclusive) { @@ -1745,7 +1745,7 @@ void sys_condition_variable_release(struct sys_condition_variable *sys_cv) __prof; struct win32_condition_variable *cv = (struct win32_condition_variable *)sys_cv; /* Condition variable must not have any sleepers (signal before releasing) */ - ASSERT(atomic_i64_eval(&cv->num_waiters) == 0); + ASSERT(atomic_i64_fetch(&cv->num_waiters) == 0); win32_condition_variable_release(cv); } @@ -1755,11 +1755,11 @@ void sys_condition_variable_wait(struct sys_condition_variable *sys_cv, struct s struct win32_mutex *m = (struct win32_mutex *)lock->mutex; b32 exclusive = lock->exclusive; #if RTC - atomic_i64_eval_add(&cv->num_waiters, 1); + atomic_i64_fetch_add(&cv->num_waiters, 1); if (exclusive) { m->owner_tid = 0; } - atomic_i64_eval_add(&m->count, -1); + atomic_i64_fetch_add(&m->count, -1); #endif /* TODO: Correct profiling of internal condition variable sleep / wait mutex state */ @@ -1778,11 +1778,11 @@ void sys_condition_variable_wait(struct sys_condition_variable *sys_cv, struct s } #if RTC - atomic_i64_eval_add(&m->count, 1); + atomic_i64_fetch_add(&m->count, 1); if (exclusive) { m->owner_tid = (u64)GetCurrentThreadId(); } - atomic_i64_eval_add(&cv->num_waiters, -1); + atomic_i64_fetch_add(&cv->num_waiters, -1); #endif } @@ -1792,11 +1792,11 @@ void sys_condition_variable_wait_time(struct sys_condition_variable *sys_cv, str struct win32_mutex *m = (struct win32_mutex *)lock->mutex; b32 exclusive = lock->exclusive; #if RTC - atomic_i64_eval_add(&cv->num_waiters, 1); + atomic_i64_fetch_add(&cv->num_waiters, 1); if (exclusive) { m->owner_tid = 0; } - atomic_i64_eval_add(&m->count, -1); + atomic_i64_fetch_add(&m->count, -1); #endif u32 ms = (u32)math_round_to_int((f32)seconds * 1000.f); @@ -1816,11 +1816,11 @@ void sys_condition_variable_wait_time(struct sys_condition_variable *sys_cv, str } #if RTC - atomic_i64_eval_add(&m->count, 1); + atomic_i64_fetch_add(&m->count, 1); if (exclusive) { m->owner_tid = (u64)GetCurrentThreadId(); } - atomic_i64_eval_add(&cv->num_waiters, -1); + atomic_i64_fetch_add(&cv->num_waiters, -1); #endif } @@ -2167,7 +2167,7 @@ void sys_exit(void) void sys_panic(struct string msg) { - if (atomic_i32_eval_compare_exchange(&G.panicking, 0, 1) == 0) { + if (atomic_i32_fetch_test_set(&G.panicking, 0, 1) == 0) { log_panic(msg); wchar_t *wstr = G.panic_wstr; @@ -2504,7 +2504,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance, } /* Find any dangling threads that haven't exited gracefully by now */ - if (!atomic_i32_eval(&G.panicking)) { + if (!atomic_i32_fetch(&G.panicking)) { struct sys_lock lock = sys_mutex_lock_s(G.threads_mutex); if (G.threads_first) { struct arena_temp scratch = scratch_begin_no_conflict(); @@ -2524,7 +2524,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance, } /* Check if panicking */ - if (atomic_i32_eval(&G.panicking)) { + if (atomic_i32_fetch(&G.panicking)) { /* Wait for panic message to be ready */ WaitForSingleObject(G.panic_event, INFINITE); /* Set error and abort */ diff --git a/src/sys_win32.c b/src/sys_win32.c index 0340b518..e293bb65 100644 --- a/src/sys_win32.c +++ b/src/sys_win32.c @@ -960,14 +960,14 @@ INTERNAL SYS_THREAD_DEF(window_thread_entry_point, arg) sync_flag_set(&window->ready_sf); - while (!atomic_i32_eval(&window->event_thread_shutdown)) { + while (!atomic_i32_fetch(&window->event_thread_shutdown)) { MSG msg = ZI; { GetMessageW(&msg, 0, 0, 0); } { __profscope(Process window message); - if (atomic_i32_eval(&window->event_thread_shutdown)) { + if (atomic_i32_fetch(&window->event_thread_shutdown)) { break; } @@ -1073,7 +1073,7 @@ INTERNAL void win32_window_release(struct win32_window *window) G.first_free_window = window; /* Stop window thread */ - atomic_i32_eval_exchange(&window->event_thread_shutdown, 1); + atomic_i32_fetch_set(&window->event_thread_shutdown, 1); win32_window_wake(window); sys_thread_wait_release(window->event_thread); @@ -1631,7 +1631,7 @@ void sys_mutex_release(struct sys_mutex *mutex) { __prof; struct win32_mutex *m = (struct win32_mutex *)mutex; - ASSERT(atomic_i64_eval(&m->count) == 0); /* Mutex should be unlocked */ + ASSERT(atomic_i64_fetch(&m->count) == 0); /* Mutex should be unlocked */ { struct sys_lock lock = sys_mutex_lock_e(G.mutexes_mutex); m->next_free = G.first_free_mutex; @@ -1648,7 +1648,7 @@ struct sys_lock sys_mutex_lock_e(struct sys_mutex *mutex) __proflock_after_exclusive_lock(m->profiling_ctx); #if RTC m->owner_tid = (u64)GetCurrentThreadId(); - atomic_i64_eval_add(&m->count, 1); + atomic_i64_fetch_add(&m->count, 1); #endif struct sys_lock lock = ZI; lock.exclusive = true; @@ -1663,7 +1663,7 @@ struct sys_lock sys_mutex_lock_s(struct sys_mutex *mutex) AcquireSRWLockShared((SRWLOCK *)&m->srwlock); __proflock_after_shared_lock(m->profiling_ctx); #if RTC - atomic_i64_eval_add(&m->count, 1); + atomic_i64_fetch_add(&m->count, 1); #endif struct sys_lock lock = ZI; lock.mutex = mutex; @@ -1674,7 +1674,7 @@ void sys_mutex_unlock(struct sys_lock *lock) { struct win32_mutex *m = (struct win32_mutex *)lock->mutex; #if RTC - atomic_i64_eval_add(&m->count, -1); + atomic_i64_fetch_add(&m->count, -1); m->owner_tid = 0; #endif if (lock->exclusive) { @@ -1745,7 +1745,7 @@ void sys_condition_variable_release(struct sys_condition_variable *sys_cv) __prof; struct win32_condition_variable *cv = (struct win32_condition_variable *)sys_cv; /* Condition variable must not have any sleepers (signal before releasing) */ - ASSERT(atomic_i64_eval(&cv->num_waiters) == 0); + ASSERT(atomic_i64_fetch(&cv->num_waiters) == 0); win32_condition_variable_release(cv); } @@ -1755,11 +1755,11 @@ void sys_condition_variable_wait(struct sys_condition_variable *sys_cv, struct s struct win32_mutex *m = (struct win32_mutex *)lock->mutex; b32 exclusive = lock->exclusive; #if RTC - atomic_i64_eval_add(&cv->num_waiters, 1); + atomic_i64_fetch_add(&cv->num_waiters, 1); if (exclusive) { m->owner_tid = 0; } - atomic_i64_eval_add(&m->count, -1); + atomic_i64_fetch_add(&m->count, -1); #endif /* TODO: Correct profiling of internal condition variable sleep / wait mutex state */ @@ -1778,11 +1778,11 @@ void sys_condition_variable_wait(struct sys_condition_variable *sys_cv, struct s } #if RTC - atomic_i64_eval_add(&m->count, 1); + atomic_i64_fetch_add(&m->count, 1); if (exclusive) { m->owner_tid = (u64)GetCurrentThreadId(); } - atomic_i64_eval_add(&cv->num_waiters, -1); + atomic_i64_fetch_add(&cv->num_waiters, -1); #endif } @@ -1792,11 +1792,11 @@ void sys_condition_variable_wait_time(struct sys_condition_variable *sys_cv, str struct win32_mutex *m = (struct win32_mutex *)lock->mutex; b32 exclusive = lock->exclusive; #if RTC - atomic_i64_eval_add(&cv->num_waiters, 1); + atomic_i64_fetch_add(&cv->num_waiters, 1); if (exclusive) { m->owner_tid = 0; } - atomic_i64_eval_add(&m->count, -1); + atomic_i64_fetch_add(&m->count, -1); #endif u32 ms = (u32)math_round_to_int((f32)seconds * 1000.f); @@ -1816,11 +1816,11 @@ void sys_condition_variable_wait_time(struct sys_condition_variable *sys_cv, str } #if RTC - atomic_i64_eval_add(&m->count, 1); + atomic_i64_fetch_add(&m->count, 1); if (exclusive) { m->owner_tid = (u64)GetCurrentThreadId(); } - atomic_i64_eval_add(&cv->num_waiters, -1); + atomic_i64_fetch_add(&cv->num_waiters, -1); #endif } @@ -2167,7 +2167,7 @@ void sys_exit(void) void sys_panic(struct string msg) { - if (atomic_i32_eval_compare_exchange(&G.panicking, 0, 1) == 0) { + if (atomic_i32_fetch_test_set(&G.panicking, 0, 1) == 0) { log_panic(msg); wchar_t *wstr = G.panic_wstr; @@ -2402,14 +2402,14 @@ GLOBAL struct { INTERNAL void atomic_lock(void) { - while (atomic_i32_eval_compare_exchange(&g_test.lock, 0, 1) != 0) { + while (atomic_i32_fetch_test_set(&g_test.lock, 0, 1) != 0) { ix_pause(); } } INTERNAL void atomic_unlock(void) { - atomic_i32_eval_exchange(&g_test.lock, 0); + atomic_i32_fetch_set(&g_test.lock, 0); } INTERNAL void push_job(tjob_func *func, void *arg) @@ -2804,7 +2804,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance, } /* Find any dangling threads that haven't exited gracefully by now */ - if (!atomic_i32_eval(&G.panicking)) { + if (!atomic_i32_fetch(&G.panicking)) { struct sys_lock lock = sys_mutex_lock_s(G.threads_mutex); if (G.threads_first) { struct arena_temp scratch = scratch_begin_no_conflict(); @@ -2824,7 +2824,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance, } /* Check if panicking */ - if (atomic_i32_eval(&G.panicking)) { + if (atomic_i32_fetch(&G.panicking)) { /* Wait for panic message to be ready */ WaitForSingleObject(G.panic_event, INFINITE); /* Set error and abort */ diff --git a/src/thread_local.c b/src/thread_local.c index 4796c3cf..56a838e7 100644 --- a/src/thread_local.c +++ b/src/thread_local.c @@ -17,14 +17,14 @@ GLOBAL struct { INTERNAL void metas_lock(void) { /* Spinlock */ - while (atomic_i32_eval_compare_exchange(&G.metas_lock_flag, 0, 1) == 0) { + while (atomic_i32_fetch_test_set(&G.metas_lock_flag, 0, 1) == 0) { ix_pause(); } } INTERNAL void metas_unlock(void) { - atomic_i32_eval_exchange(&G.metas_lock_flag, 0); + atomic_i32_fetch_set(&G.metas_lock_flag, 0); } struct thread_local_store thread_local_store_alloc(void) @@ -56,23 +56,23 @@ void thread_local_store_release(struct thread_local_store *t) arena_release(t->arena); } -volatile void *_thread_local_var_eval(struct thread_local_var_meta *meta) +volatile void *_thread_local_var_fetch(struct thread_local_var_meta *meta) { /* Register var if unregistered */ u64 id; { - u64 id_plus_one = atomic_u64_eval(&meta->id_plus_one); + u64 id_plus_one = atomic_u64_fetch(&meta->id_plus_one); if (id_plus_one == 0) { __profscope(Register thread local var); metas_lock(); { - id_plus_one = atomic_u64_eval(&meta->id_plus_one); /* Reevaluate now that we've locked */ + id_plus_one = atomic_u64_fetch(&meta->id_plus_one); /* Reevaluate now that we've locked */ if (id_plus_one == 0) { id = G.metas_count++; if (id >= MAX_THREAD_LOCAL_VARS) { sys_panic(LIT("Maximum number of thread local variables reached")); } - atomic_u64_eval_exchange(&meta->id_plus_one, id + 1); + atomic_u64_fetch_set(&meta->id_plus_one, id + 1); G.metas[id] = *meta; } else { id = id_plus_one - 1; diff --git a/src/thread_local.h b/src/thread_local.h index 1c5ef45e..022d1af9 100644 --- a/src/thread_local.h +++ b/src/thread_local.h @@ -55,11 +55,11 @@ struct thread_local_var_meta { } #if TYPEOF_DEFINED -# define thread_local_var_eval(var_ptr) (typeof((var_ptr)->_t))(_thread_local_var_eval(&(var_ptr)->meta)) +# define thread_local_var_fetch(var_ptr) (typeof((var_ptr)->_t))(_thread_local_var_fetch(&(var_ptr)->meta)) #else -# define thread_local_var_eval(var_ptr) (void *)(_thread_local_var_eval(&(var_ptr)->meta)) +# define thread_local_var_fetch(var_ptr) (void *)(_thread_local_var_fetch(&(var_ptr)->meta)) #endif -volatile void *_thread_local_var_eval(struct thread_local_var_meta *meta); +volatile void *_thread_local_var_fetch(struct thread_local_var_meta *meta); #endif diff --git a/src/user.c b/src/user.c index 3510cf0a..10532191 100644 --- a/src/user.c +++ b/src/user.c @@ -273,7 +273,7 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(user_shutdown) __prof; sys_window_unregister_event_callback(G.window, &window_event_callback); - atomic_i32_eval_exchange(&G.shutdown, true); + atomic_i32_fetch_set(&G.shutdown, true); } /* ========================== * @@ -653,13 +653,13 @@ INTERNAL void user_update(void) struct sim_snapshot *newest_snapshot = sim_snapshot_from_tick(G.user_unblended_client, G.user_unblended_client->last_tick); G.local_sim_last_known_time_ns = newest_snapshot->sim_time_ns; G.local_sim_last_known_tick = newest_snapshot->tick; - if (atomic_i32_eval(&G.user_paused)) { + if (atomic_i32_fetch(&G.user_paused)) { G.local_sim_predicted_time_ns = G.local_sim_last_known_tick; } else { G.local_sim_predicted_time_ns = newest_snapshot->sim_time_ns + (newest_snapshot->sim_dt_ns * tick_progress); } - if (USER_INTERP_ENABLED && !atomic_i32_eval(&G.user_paused)) { + if (USER_INTERP_ENABLED && !atomic_i32_fetch(&G.user_paused)) { /* Determine render time */ G.render_time_target_ns = G.local_sim_predicted_time_ns - (USER_INTERP_RATIO * G.average_local_to_user_snapshot_publish_dt_ns); if (G.average_local_to_user_snapshot_publish_dt_ns > 0) { @@ -1852,9 +1852,9 @@ INTERNAL void user_update(void) } if (pause_state.num_presses) { - atomic_i32_eval_xor(&G.user_paused, 1); + atomic_i32_fetch_xor(&G.user_paused, 1); } - atomic_i32_eval_add(&G.user_paused_steps, step_state.num_presses_and_repeats); + atomic_i32_fetch_add(&G.user_paused_steps, step_state.num_presses_and_repeats); /* Set user sim control */ { @@ -2113,7 +2113,7 @@ INTERNAL JOB_DEF(user_job, _) i64 last_frame_ns = 0; i64 target_dt_ns = NS_FROM_SECONDS(USER_FPS_LIMIT > (0) ? (1.0 / USER_FPS_LIMIT) : 0); - while (!atomic_i32_eval(&G.shutdown)) { + while (!atomic_i32_fetch(&G.shutdown)) { { __profscope(User sleep); sleep_frame(last_frame_ns, target_dt_ns); @@ -2273,7 +2273,7 @@ INTERNAL JOB_DEF(local_sim_job, _) i64 real_dt_ns = 0; i64 step_dt_ns = NS_FROM_SECONDS(1) / SIM_TICKS_PER_SECOND; f64 compute_timescale = 1.0; - while (!atomic_i32_eval(&G.shutdown)) { + while (!atomic_i32_fetch(&G.shutdown)) { struct arena_temp scratch = scratch_begin_no_conflict(); { __profscope(Sim sleep); @@ -2444,10 +2444,10 @@ INTERNAL JOB_DEF(local_sim_job, _) } } - b32 should_step = !atomic_i32_eval(&G.user_paused); - if (atomic_i32_eval(&G.user_paused_steps) > 0) { + b32 should_step = !atomic_i32_fetch(&G.user_paused); + if (atomic_i32_fetch(&G.user_paused_steps) > 0) { should_step = true; - atomic_i32_eval_add(&G.user_paused_steps, -1); + atomic_i32_fetch_add(&G.user_paused_steps, -1); } if (!should_step) {