From b50ba7d4e2eb72c836c4abbc34a9db66a7663efe Mon Sep 17 00:00:00 2001 From: jacob Date: Tue, 17 Jun 2025 23:30:59 -0500 Subject: [PATCH] move mutexes & cvs into opaque ptrs --- src/app.c | 6 +- src/arena.c | 6 +- src/asset_cache.c | 16 +-- src/font.c | 6 +- src/gpu_dx11.c | 24 ++--- src/host.c | 6 +- src/host.h | 2 +- src/log.c | 6 +- src/mixer.c | 18 ++-- src/resource.c | 24 ++--- src/sock_win32.c | 6 +- src/sound.c | 6 +- src/sprite.c | 44 ++++----- src/sys.h | 34 ++----- src/sys_win32.c | 242 +++++++++++++++++++++++++++------------------- src/user.c | 24 ++--- src/util.h | 16 +-- src/work.c | 60 ++++++------ 18 files changed, 285 insertions(+), 261 deletions(-) diff --git a/src/app.c b/src/app.c index 05e7e147..2e95d474 100644 --- a/src/app.c +++ b/src/app.c @@ -37,7 +37,7 @@ GLOBAL struct { struct sync_flag exit_sf; /* Exit callbacks */ - struct sys_mutex exit_callbacks_mutex; + struct sys_mutex *exit_callbacks_mutex; struct arena *exit_callbacks_arena; struct exit_callback *exit_callbacks_head; } G = ZI, DEBUG_ALIAS(G, G_app); @@ -106,7 +106,7 @@ INTERNAL struct sys_window_settings default_window_settings(struct sys_window *w void app_register_exit_callback(app_exit_callback_func *func) { - struct sys_lock lock = sys_mutex_lock_e(&G.exit_callbacks_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.exit_callbacks_mutex); struct exit_callback *callback = arena_push(G.exit_callbacks_arena, struct exit_callback); callback->func = func; callback->next = G.exit_callbacks_head; @@ -350,7 +350,7 @@ void app_entry_point(struct string args_str) * if something gets stuck) */ { __profscope(Run exit callbacks); - struct sys_lock lock = sys_mutex_lock_e(&G.exit_callbacks_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.exit_callbacks_mutex); for (struct exit_callback *callback = G.exit_callbacks_head; callback; callback = callback->next) { callback->func(); } diff --git a/src/arena.c b/src/arena.c index f3c2c49e..8f12e02a 100644 --- a/src/arena.c +++ b/src/arena.c @@ -53,7 +53,7 @@ void arena_release(struct arena *arena) { ASAN_UNPOISON(arena->reserve_, arena->committed + ARENA_HEADER_SIZE); __prof; - __proffree(arena->reserve_base); + __proffree(arena); gstat_sub(GSTAT_MEMORY_COMMITTED, arena->committed); gstat_sub(GSTAT_MEMORY_RESERVED, arena->reserved); gstat_sub(GSTAT_NUM_ARENAS, 1); @@ -92,8 +92,8 @@ void *arena_push_bytes_no_zero(struct arena *arena, u64 size, u64 align) } arena->committed += commit_bytes; gstat_add(GSTAT_MEMORY_COMMITTED, commit_bytes); - __proffree(arena->reserve_base); - __profalloc(arena->reserve_base, arena->committed + commit_bytes + ARENA_HEADER_SIZE); + __proffree(arena); + __profalloc(arena, arena->committed + commit_bytes + ARENA_HEADER_SIZE); ASAN_POISON(commit_address, commit_bytes); } diff --git a/src/asset_cache.c b/src/asset_cache.c index 87f8413b..075b7c41 100644 --- a/src/asset_cache.c +++ b/src/asset_cache.c @@ -16,18 +16,18 @@ #define ASSET_LOOKUP_TABLE_CAPACITY (MAX_ASSETS * 4) GLOBAL struct { - struct sys_mutex lookup_mutex; + struct sys_mutex *lookup_mutex; struct asset lookup[ASSET_LOOKUP_TABLE_CAPACITY]; u64 num_assets; - struct sys_mutex store_mutex; + struct sys_mutex *store_mutex; struct arena *store_arena; #if RTC /* Array of len `num_assets` pointing into populated entries of `lookup`. */ struct asset *dbg_table[ASSET_LOOKUP_TABLE_CAPACITY]; u64 dbg_table_count; - struct sys_mutex dbg_table_mutex; + struct sys_mutex *dbg_table_mutex; #endif } G = ZI, DEBUG_ALIAS(G, G_asset_cache); @@ -59,7 +59,7 @@ struct asset_cache_startup_receipt asset_cache_startup(struct work_startup_recei INTERNAL void refresh_dbg_table(void) { #if RTC - struct sys_lock lock = sys_mutex_lock_e(&G.dbg_table_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.dbg_table_mutex); MEMZERO_ARRAY(G.dbg_table); G.dbg_table_count = 0; for (u64 i = 0; i < ARRAY_COUNT(G.lookup); ++i) { @@ -76,7 +76,7 @@ INTERNAL void refresh_dbg_table(void) * Check returned slot->hash != 0 for presence. */ INTERNAL struct asset *asset_cache_get_slot_locked(struct sys_lock *lock, struct string key, u64 hash) { - sys_assert_locked_e_or_s(lock, &G.lookup_mutex); + sys_assert_locked_e_or_s(lock, G.lookup_mutex); (UNUSED)lock; u64 index = hash % ARRAY_COUNT(G.lookup); @@ -123,14 +123,14 @@ struct asset *asset_cache_touch(struct string key, u64 hash, b32 *is_first_touch /* Lookup */ { - struct sys_lock lock = sys_mutex_lock_s(&G.lookup_mutex); + struct sys_lock lock = sys_mutex_lock_s(G.lookup_mutex); asset = asset_cache_get_slot_locked(&lock, key, hash); sys_mutex_unlock(&lock); } /* Insert if not found */ if (!asset->hash) { - struct sys_lock lock = sys_mutex_lock_e(&G.lookup_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.lookup_mutex); /* Re-check asset presence in case it was inserted since lock */ asset = asset_cache_get_slot_locked(&lock, key, hash); @@ -232,7 +232,7 @@ void *asset_cache_get_store_data(struct asset *asset) /* Asset store should be opened to allocate memory to the store arena */ struct asset_cache_store asset_cache_store_open(void) { - struct sys_lock lock = sys_mutex_lock_e(&G.store_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.store_mutex); struct asset_cache_store store = { .lock = lock, .arena = G.store_arena diff --git a/src/font.c b/src/font.c index 0e321e6b..5f1fc03c 100644 --- a/src/font.c +++ b/src/font.c @@ -26,7 +26,7 @@ struct font_task_params { struct font_task_params_store { struct font_task_params *head_free; struct arena *arena; - struct sys_mutex mutex; + struct sys_mutex *mutex; }; /* ========================== * @@ -67,7 +67,7 @@ INTERNAL struct font_task_params *font_task_params_alloc(void) { struct font_task_params *p = NULL; { - struct sys_lock lock = sys_mutex_lock_e(&G.params.mutex); + struct sys_lock lock = sys_mutex_lock_e(G.params.mutex); if (G.params.head_free) { p = G.params.head_free; G.params.head_free = p->next_free; @@ -81,7 +81,7 @@ INTERNAL struct font_task_params *font_task_params_alloc(void) INTERNAL void font_task_params_release(struct font_task_params *p) { - struct sys_lock lock = sys_mutex_lock_e(&G.params.mutex); + struct sys_lock lock = sys_mutex_lock_e(G.params.mutex); p->next_free = G.params.head_free; G.params.head_free = p; sys_mutex_unlock(&lock); diff --git a/src/gpu_dx11.c b/src/gpu_dx11.c index c459c287..de1d0a05 100644 --- a/src/gpu_dx11.c +++ b/src/gpu_dx11.c @@ -213,7 +213,7 @@ struct dx11_shader_desc { /* Internal */ #if RESOURCE_RELOADING struct arena *includes_arena; - struct sys_mutex includes_mutex; + struct sys_mutex *includes_mutex; struct dict includes_dict; struct atomic_i32 is_dirty; #endif @@ -241,22 +241,22 @@ GLOBAL struct { ID3D11SamplerState *sampler_state; /* Buffer pool */ - struct sys_mutex buffers_mutex; + struct sys_mutex *buffers_mutex; struct arena *buffers_arena; struct dx11_buffer *first_free_buffer; /* Plan pool */ - struct sys_mutex plans_mutex; + struct sys_mutex *plans_mutex; struct arena *plans_arena; struct dx11_plan *first_free_plan; /* Dispatch state pool */ - struct sys_mutex dispatch_states_mutex; + struct sys_mutex *dispatch_states_mutex; struct arena *dispatch_states_arena; struct dx11_dispatch_state *first_free_dispatch_state; /* Texture pool */ - struct sys_mutex textures_mutex; + struct sys_mutex *textures_mutex; struct arena *textures_arena; struct dx11_texture *first_free_texture; @@ -701,7 +701,7 @@ INTERNAL void shader_add_include(struct dx11_shader_desc *desc, struct string in __prof; u64 hash = hash_fnv64(HASH_FNV64_BASIS, include_name_src); struct dict *dict = &desc->includes_dict; - struct sys_lock lock = sys_mutex_lock_e(&desc->includes_mutex); + struct sys_lock lock = sys_mutex_lock_e(desc->includes_mutex); { dict_set(desc->includes_arena, dict, hash, 1); } @@ -712,7 +712,7 @@ INTERNAL void shader_reset_includes(struct dx11_shader_desc *desc) { __prof; struct dict *dict = &desc->includes_dict; - struct sys_lock lock = sys_mutex_lock_e(&desc->includes_mutex); + struct sys_lock lock = sys_mutex_lock_e(desc->includes_mutex); { dict_reset(dict); } @@ -732,7 +732,7 @@ INTERNAL b32 shader_set_dirty(struct string name) } else { struct dict *includes_dict = &desc->includes_dict; u64 hash = hash_fnv64(HASH_FNV64_BASIS, name); - struct sys_lock lock = sys_mutex_lock_e(&desc->includes_mutex); + struct sys_lock lock = sys_mutex_lock_e(desc->includes_mutex); { if (dict_get(includes_dict, hash) != 0) { atomic_i32_eval_exchange(&desc->is_dirty, 1); @@ -1039,7 +1039,7 @@ INTERNAL struct dx11_texture *dx11_texture_alloc(enum DXGI_FORMAT format, u32 fl { struct dx11_texture *t = NULL; { - struct sys_lock lock = sys_mutex_lock_e(&G.textures_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.textures_mutex); if (G.first_free_texture) { t = G.first_free_texture; G.first_free_texture = t->next_free; @@ -1095,7 +1095,7 @@ INTERNAL struct dx11_texture *dx11_texture_alloc(enum DXGI_FORMAT format, u32 fl INTERNAL void dx11_texture_release(struct dx11_texture *t) { { - struct sys_lock lock = sys_mutex_lock_e(&G.textures_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.textures_mutex); t->next_free = G.first_free_texture; G.first_free_texture = t; sys_mutex_unlock(&lock); @@ -1169,7 +1169,7 @@ INTERNAL struct dx11_buffer *dx11_buffer_alloc(struct D3D11_BUFFER_DESC desc, D3 { struct arena *cpu_buffer_arena = NULL; { - struct sys_lock lock = sys_mutex_lock_e(&G.buffers_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.buffers_mutex); if (G.first_free_buffer) { buffer = G.first_free_buffer; G.first_free_buffer = buffer->next_free; @@ -1291,7 +1291,7 @@ struct gpu_handle gpu_plan_alloc(void) struct arena *cpu_cmds_arena = NULL; struct arena *gpu_cmds_arena = NULL; { - struct sys_lock lock = sys_mutex_lock_e(&G.plans_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.plans_mutex); if (G.first_free_plan) { plan = G.first_free_plan; G.first_free_plan = plan->next_free; diff --git a/src/host.c b/src/host.c index 3a767fee..9672bdd2 100644 --- a/src/host.c +++ b/src/host.c @@ -212,7 +212,7 @@ void host_release(struct host *host) while (!sys_thread_try_release(host->receiver_thread, 0.001f)) { sock_wake(host->sock); } - sys_mutex_release(&host->rcv_buffer_write_mutex); + sys_mutex_release(host->rcv_buffer_write_mutex); sock_release(host->sock); @@ -660,7 +660,7 @@ struct host_event_list host_update_begin(struct arena *arena, struct host *host) /* Swap read & write rcv buffers */ { - struct sys_lock lock = sys_mutex_lock_e(&host->rcv_buffer_write_mutex); + struct sys_lock lock = sys_mutex_lock_e(host->rcv_buffer_write_mutex); struct host_rcv_buffer *swp = host->rcv_buffer_read; host->rcv_buffer_read = host->rcv_buffer_write; host->rcv_buffer_write = swp; @@ -1083,7 +1083,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(host_receiver_thread_entry_point, arg) struct sock_address address = res.address; struct string data = res.data; if (data.len > 0) { - struct sys_lock lock = sys_mutex_lock_e(&host->rcv_buffer_write_mutex); + struct sys_lock lock = sys_mutex_lock_e(host->rcv_buffer_write_mutex); { struct host_rcv_buffer *rcv_buffer = host->rcv_buffer_write; struct host_rcv_packet *packet = arena_push(rcv_buffer->arena, struct host_rcv_packet); diff --git a/src/host.h b/src/host.h index 3af3c106..6b23b2e3 100644 --- a/src/host.h +++ b/src/host.h @@ -90,7 +90,7 @@ struct host { u64 num_msg_assembler_lookup_bins; /* Double buffer for incoming data */ - struct sys_mutex rcv_buffer_write_mutex; + struct sys_mutex *rcv_buffer_write_mutex; struct host_rcv_buffer *rcv_buffer_read; struct host_rcv_buffer *rcv_buffer_write; diff --git a/src/log.c b/src/log.c index 43e8434b..46a555b4 100644 --- a/src/log.c +++ b/src/log.c @@ -16,7 +16,7 @@ struct log_event_callback { GLOBAL struct { struct atomic_i32 initialized; - struct sys_mutex callbacks_mutex; + struct sys_mutex *callbacks_mutex; struct arena *callbacks_arena; struct log_event_callback *first_callback; struct log_event_callback *last_callback; @@ -84,7 +84,7 @@ void log_startup(struct string logfile_path) void log_register_callback(log_event_callback_func *func, i32 level) { if (!atomic_i32_eval(&G.initialized)) { return; } - struct sys_lock lock = sys_mutex_lock_e(&G.callbacks_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.callbacks_mutex); { struct log_event_callback *callback = arena_push(G.callbacks_arena, struct log_event_callback); callback->func = func; @@ -213,7 +213,7 @@ void _log(i32 level, struct string msg) event.line = line; #endif { - struct sys_lock lock = sys_mutex_lock_s(&G.callbacks_mutex); + struct sys_lock lock = sys_mutex_lock_s(G.callbacks_mutex); for (struct log_event_callback *callback = G.first_callback; callback; callback = callback->next) { if (level <= callback->level) { callback->func(event); diff --git a/src/mixer.c b/src/mixer.c index 05a3c516..70f08a98 100644 --- a/src/mixer.c +++ b/src/mixer.c @@ -52,7 +52,7 @@ struct track { }; GLOBAL struct { - struct sys_mutex mutex; + struct sys_mutex *mutex; /* Listener */ struct v2 listener_pos; @@ -104,7 +104,7 @@ INTERNAL struct track *track_from_handle(struct mixer_track_handle handle) INTERNAL struct track *track_alloc_locked(struct sys_lock *lock, struct sound *sound) { - sys_assert_locked_e(lock, &G.mutex); + sys_assert_locked_e(lock, G.mutex); (UNUSED)lock; struct track *track = NULL; @@ -143,7 +143,7 @@ INTERNAL struct track *track_alloc_locked(struct sys_lock *lock, struct sound *s INTERNAL void track_release_locked(struct sys_lock *lock, struct track *track) { - sys_assert_locked_e(lock, &G.mutex); + sys_assert_locked_e(lock, G.mutex); (UNUSED)lock; /* Remove from playing list */ @@ -189,7 +189,7 @@ struct mixer_track_handle mixer_play_ex(struct sound *sound, struct mixer_desc d { struct track *track; { - struct sys_lock lock = sys_mutex_lock_e(&G.mutex); + struct sys_lock lock = sys_mutex_lock_e(G.mutex); { track = track_alloc_locked(&lock, sound); track->desc = desc; @@ -207,7 +207,7 @@ struct mixer_desc mixer_track_get(struct mixer_track_handle handle) struct track *track = track_from_handle(handle); if (track) { /* TODO: Only lock mutex on track itself or something */ - struct sys_lock lock = sys_mutex_lock_e(&G.mutex); + struct sys_lock lock = sys_mutex_lock_e(G.mutex); { /* Confirm handle is still valid now that we're locked */ track = track_from_handle(handle); @@ -227,7 +227,7 @@ void mixer_track_set(struct mixer_track_handle handle, struct mixer_desc desc) struct track *track = track_from_handle(handle); if (track) { /* TODO: Only lock mutex on track itself or something */ - struct sys_lock lock = sys_mutex_lock_e(&G.mutex); + struct sys_lock lock = sys_mutex_lock_e(G.mutex); { /* Confirm handle is still valid now that we're locked */ track = track_from_handle(handle); @@ -241,7 +241,7 @@ void mixer_track_set(struct mixer_track_handle handle, struct mixer_desc desc) void mixer_set_listener(struct v2 pos, struct v2 dir) { - struct sys_lock lock = sys_mutex_lock_e(&G.mutex); + struct sys_lock lock = sys_mutex_lock_e(G.mutex); { G.listener_pos = pos; G.listener_dir = v2_norm(dir); @@ -282,7 +282,7 @@ struct mixed_pcm_f32 mixer_update(struct arena *arena, u64 frame_count) struct mix **mixes = NULL; u64 mixes_count = 0; { - struct sys_lock lock = sys_mutex_lock_e(&G.mutex); + struct sys_lock lock = sys_mutex_lock_e(G.mutex); /* Read listener info */ listener_pos = G.listener_pos; @@ -470,7 +470,7 @@ struct mixed_pcm_f32 mixer_update(struct arena *arena, u64 frame_count) { __profscope(update_track_effect_data); - struct sys_lock lock = sys_mutex_lock_e(&G.mutex); + struct sys_lock lock = sys_mutex_lock_e(G.mutex); for (u64 i = 0; i < mixes_count; ++i) { struct mix *mix = mixes[i]; struct track *track = track_from_handle(mix->track_handle); diff --git a/src/resource.c b/src/resource.c index b4ab9f2d..d8264237 100644 --- a/src/resource.c +++ b/src/resource.c @@ -29,12 +29,12 @@ GLOBAL struct { struct sys_watch *watch; struct atomic_i32 watch_shutdown; - struct sys_mutex watch_dispatcher_mutex; + struct sys_mutex *watch_dispatcher_mutex; struct arena *watch_dispatcher_info_arena; struct sys_watch_info_list watch_dispatcher_info_list; - struct sys_condition_variable watch_dispatcher_cv; + struct sys_condition_variable *watch_dispatcher_cv; - struct sys_mutex watch_callbacks_mutex; + struct sys_mutex *watch_callbacks_mutex; resource_watch_callback *watch_callbacks[64]; u64 num_watch_callbacks; #endif @@ -162,7 +162,7 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(resource_shutdown) __prof; atomic_i32_eval_exchange(&G.watch_shutdown, 1); - sys_condition_variable_broadcast(&G.watch_dispatcher_cv); + sys_condition_variable_broadcast(G.watch_dispatcher_cv); sys_watch_wake(G.watch); sys_thread_wait_release(G.resource_watch_dispatch_thread); @@ -171,7 +171,7 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(resource_shutdown) void resource_register_watch_callback(resource_watch_callback *callback) { - struct sys_lock lock = sys_mutex_lock_e(&G.watch_callbacks_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.watch_callbacks_mutex); { if (G.num_watch_callbacks < ARRAY_COUNT(G.watch_callbacks)) { G.watch_callbacks[G.num_watch_callbacks++] = callback; @@ -191,7 +191,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(resource_watch_monitor_thread_entry_poi struct arena_temp temp = arena_temp_begin(scratch.arena); struct sys_watch_info_list res = sys_watch_wait(temp.arena, G.watch); if (res.first && !atomic_i32_eval(&G.watch_shutdown)) { - struct sys_lock lock = sys_mutex_lock_e(&G.watch_dispatcher_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.watch_dispatcher_mutex); { struct sys_watch_info_list list_part = sys_watch_info_copy(G.watch_dispatcher_info_arena, res); if (G.watch_dispatcher_info_list.last) { @@ -203,7 +203,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(resource_watch_monitor_thread_entry_poi } } sys_mutex_unlock(&lock); - sys_condition_variable_broadcast(&G.watch_dispatcher_cv); + sys_condition_variable_broadcast(G.watch_dispatcher_cv); } arena_temp_end(temp); } @@ -224,15 +224,15 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(resource_watch_dispatcher_thread_entry_ (UNUSED)_; struct arena_temp scratch = scratch_begin_no_conflict(); - struct sys_lock watch_dispatcher_lock = sys_mutex_lock_e(&G.watch_dispatcher_mutex); + struct sys_lock watch_dispatcher_lock = sys_mutex_lock_e(G.watch_dispatcher_mutex); while (!atomic_i32_eval(&G.watch_shutdown)) { - sys_condition_variable_wait(&G.watch_dispatcher_cv, &watch_dispatcher_lock); + sys_condition_variable_wait(G.watch_dispatcher_cv, &watch_dispatcher_lock); if (!atomic_i32_eval(&G.watch_shutdown) && G.watch_dispatcher_info_arena->pos > 0) { /* Unlock and sleep a bit so duplicate events pile up */ { sys_mutex_unlock(&watch_dispatcher_lock); sys_sleep(WATCH_DISPATCHER_DELAY_SECONDS); - watch_dispatcher_lock = sys_mutex_lock_e(&G.watch_dispatcher_mutex); + watch_dispatcher_lock = sys_mutex_lock_e(G.watch_dispatcher_mutex); } if (!atomic_i32_eval(&G.watch_shutdown)) { struct arena_temp temp = arena_temp_begin(scratch.arena); @@ -257,7 +257,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(resource_watch_dispatcher_thread_entry_ dict_set(temp.arena, &dedup_dict, hash, 1); } if (!skip) { - struct sys_lock callbacks_lock = sys_mutex_lock_s(&G.watch_callbacks_mutex); + struct sys_lock callbacks_lock = sys_mutex_lock_s(G.watch_callbacks_mutex); for (u64 i = 0; i < G.num_watch_callbacks; ++i) { resource_watch_callback *callback = G.watch_callbacks[i]; callback(info->name); @@ -266,7 +266,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(resource_watch_dispatcher_thread_entry_ } } } - watch_dispatcher_lock = sys_mutex_lock_e(&G.watch_dispatcher_mutex); + watch_dispatcher_lock = sys_mutex_lock_e(G.watch_dispatcher_mutex); arena_temp_end(temp); } diff --git a/src/sock_win32.c b/src/sock_win32.c index dfb5bc41..bbafa837 100644 --- a/src/sock_win32.c +++ b/src/sock_win32.c @@ -42,7 +42,7 @@ struct win32_sock { GLOBAL struct { WSADATA wsa_data; struct arena *win32_socks_arena; - struct sys_mutex win32_socks_mutex; + struct sys_mutex *win32_socks_mutex; struct win32_sock *first_free_win32_sock; } G = ZI, DEBUG_ALIAS(G, G_sock_win32); @@ -298,7 +298,7 @@ INTERNAL struct win32_sock *win32_sock_alloc(void) { struct win32_sock *ws = NULL; { - struct sys_lock lock = sys_mutex_lock_e(&G.win32_socks_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.win32_socks_mutex); if (G.first_free_win32_sock) { ws = G.first_free_win32_sock; G.first_free_win32_sock = ws->next_free; @@ -313,7 +313,7 @@ INTERNAL struct win32_sock *win32_sock_alloc(void) INTERNAL void win32_sock_release(struct win32_sock *ws) { - struct sys_lock lock = sys_mutex_lock_e(&G.win32_socks_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.win32_socks_mutex); ws->next_free = G.first_free_win32_sock; G.first_free_win32_sock = ws; sys_mutex_unlock(&lock); diff --git a/src/sound.c b/src/sound.c index 5825e604..ee8ec0f3 100644 --- a/src/sound.c +++ b/src/sound.c @@ -20,7 +20,7 @@ struct sound_task_params { struct sound_task_params_store { struct sound_task_params *head_free; struct arena *arena; - struct sys_mutex mutex; + struct sys_mutex *mutex; }; /* ========================== * @@ -57,7 +57,7 @@ INTERNAL struct sound_task_params *sound_task_params_alloc(void) { struct sound_task_params *p = NULL; { - struct sys_lock lock = sys_mutex_lock_e(&G.params.mutex); + struct sys_lock lock = sys_mutex_lock_e(G.params.mutex); if (G.params.head_free) { p = G.params.head_free; G.params.head_free = p->next_free; @@ -71,7 +71,7 @@ INTERNAL struct sound_task_params *sound_task_params_alloc(void) INTERNAL void sound_task_params_release(struct sound_task_params *p) { - struct sys_lock lock = sys_mutex_lock_e(&G.params.mutex); + struct sys_lock lock = sys_mutex_lock_e(G.params.mutex); p->next_free = G.params.head_free; G.params.head_free = p; sys_mutex_unlock(&lock); diff --git a/src/sprite.c b/src/sprite.c index 6085ef8f..25e40de9 100644 --- a/src/sprite.c +++ b/src/sprite.c @@ -91,7 +91,7 @@ struct cache_entry { }; struct cache_bin { - struct sys_mutex mutex; + struct sys_mutex *mutex; struct cache_entry *first; struct cache_entry *last; }; @@ -100,7 +100,7 @@ struct cache { struct atomic_u64 memory_usage; struct arena *arena; struct cache_bin *bins; - struct sys_mutex entry_pool_mutex; + struct sys_mutex *entry_pool_mutex; struct cache_entry *entry_pool_first_free; }; @@ -142,7 +142,7 @@ GLOBAL struct { struct cache cache; /* Load cmds */ - struct sys_mutex load_cmds_mutex; + struct sys_mutex *load_cmds_mutex; struct arena *load_cmds_arena; struct load_cmd *first_free_load_cmd; @@ -154,8 +154,8 @@ GLOBAL struct { /* Evictor thread */ struct atomic_i32 evictor_cycle; b32 evictor_shutdown; - struct sys_mutex evictor_mutex; - struct sys_condition_variable evictor_cv; + struct sys_mutex *evictor_mutex; + struct sys_condition_variable *evictor_cv; struct sys_thread *evictor_thread; } G = ZI, DEBUG_ALIAS(G, G_sprite); @@ -272,9 +272,9 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(sprite_shutdown) __prof; /* Signal evictor shutdown */ { - struct sys_lock lock = sys_mutex_lock_e(&G.evictor_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.evictor_mutex); G.evictor_shutdown = true; - sys_condition_variable_broadcast(&G.evictor_cv); + sys_condition_variable_broadcast(G.evictor_cv); sys_mutex_unlock(&lock); } sys_thread_wait_release(G.evictor_thread); @@ -316,7 +316,7 @@ INTERNAL void push_load_task(struct cache_ref ref, struct sprite_tag tag) { struct load_cmd *cmd = NULL; { - struct sys_lock lock = sys_mutex_lock_e(&G.load_cmds_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.load_cmds_mutex); if (G.first_free_load_cmd) { cmd = G.first_free_load_cmd; G.first_free_load_cmd = cmd->next_free; @@ -403,7 +403,7 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag t #if RESOURCE_RELOADING struct cache_bin *bin = &G.cache.bins[e->hash.v % CACHE_BINS_COUNT]; - struct sys_lock bin_lock = sys_mutex_lock_e(&bin->mutex); + struct sys_lock bin_lock = sys_mutex_lock_e(bin->mutex); { for (struct cache_entry *old_entry = bin->first; old_entry; old_entry = old_entry->next_in_bin) { if (old_entry != e && old_entry->hash.v == e->hash.v) { @@ -724,7 +724,7 @@ INTERNAL void cache_entry_load_sheet(struct cache_ref ref, struct sprite_tag tag #if RESOURCE_RELOADING struct cache_bin *bin = &G.cache.bins[e->hash.v % CACHE_BINS_COUNT]; - struct sys_lock bin_lock = sys_mutex_lock_e(&bin->mutex); + struct sys_lock bin_lock = sys_mutex_lock_e(bin->mutex); { for (struct cache_entry *old_entry = bin->first; old_entry; old_entry = old_entry->next_in_bin) { if (old_entry != e && old_entry->hash.v == e->hash.v) { @@ -798,7 +798,7 @@ INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref_unsafe(struct sprite_sc INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref_from_entry(struct sprite_scope *scope, struct cache_entry *e, struct sys_lock *bin_lock) { /* Guaranteed safe if caller has lock on entry's bin, since entry may not have an existing reference and could otherwise be evicted while ensuring this reference */ - sys_assert_locked_e_or_s(bin_lock, &G.cache.bins[e->hash.v % CACHE_BINS_COUNT].mutex); + sys_assert_locked_e_or_s(bin_lock, G.cache.bins[e->hash.v % CACHE_BINS_COUNT].mutex); return scope_ensure_ref_unsafe(scope, e); } @@ -864,7 +864,7 @@ INTERNAL struct sprite_scope_cache_ref *cache_lookup(struct sprite_scope *scope, struct sprite_scope_cache_ref *scope_ref = NULL; struct cache_bin *bin = &G.cache.bins[hash.v % CACHE_BINS_COUNT]; - sys_assert_locked_e_or_s(bin_lock, &bin->mutex); /* Lock required for iterating bin */ + sys_assert_locked_e_or_s(bin_lock, bin->mutex); /* Lock required for iterating bin */ #if RESOURCE_RELOADING /* If resource reloading is enabled, then we want to find the @@ -921,7 +921,7 @@ INTERNAL struct sprite_scope_cache_ref *cache_entry_from_tag(struct sprite_scope /* Search in cache */ if (!force_new) { - struct sys_lock bin_lock = sys_mutex_lock_s(&bin->mutex); + struct sys_lock bin_lock = sys_mutex_lock_s(bin->mutex); { scope_ref = cache_lookup(scope, hash, &bin_lock); } @@ -930,7 +930,7 @@ INTERNAL struct sprite_scope_cache_ref *cache_entry_from_tag(struct sprite_scope /* If not in cache, allocate new entry */ if (!scope_ref) { - struct sys_lock bin_lock = sys_mutex_lock_e(&bin->mutex); + struct sys_lock bin_lock = sys_mutex_lock_e(bin->mutex); { /* Search cache one more time in case an entry was allocated between locks */ if (!force_new) { @@ -941,7 +941,7 @@ INTERNAL struct sprite_scope_cache_ref *cache_entry_from_tag(struct sprite_scope /* Cache entry still absent, allocate new entry */ struct cache_entry *entry = NULL; { - struct sys_lock pool_lock = sys_mutex_lock_e(&G.cache.entry_pool_mutex); + struct sys_lock pool_lock = sys_mutex_lock_e(G.cache.entry_pool_mutex); if (G.cache.entry_pool_first_free) { entry = G.cache.entry_pool_first_free; G.cache.entry_pool_first_free = entry->next_free; @@ -1150,7 +1150,7 @@ INTERNAL WORK_TASK_FUNC_DEF(sprite_load_task, arg) } /* Free cmd */ - struct sys_lock lock = sys_mutex_lock_e(&G.load_cmds_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.load_cmds_mutex); { sprite_scope_end(cmd->scope); cmd->next_free = G.first_free_load_cmd; @@ -1170,7 +1170,7 @@ INTERNAL void reload_if_exists(struct sprite_scope *scope, struct sprite_tag tag struct cache_entry_hash hash = cache_entry_hash_from_tag_hash(tag.hash, kind); struct cache_bin *bin = &G.cache.bins[hash.v % CACHE_BINS_COUNT]; struct sprite_scope_cache_ref *existing_ref = NULL; - struct sys_lock bin_lock = sys_mutex_lock_s(&bin->mutex); + struct sys_lock bin_lock = sys_mutex_lock_s(bin->mutex); { existing_ref = cache_lookup(scope, hash, &bin_lock); } @@ -1233,7 +1233,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg) { (UNUSED)arg; - struct sys_lock evictor_lock = sys_mutex_lock_e(&G.evictor_mutex); + struct sys_lock evictor_lock = sys_mutex_lock_e(G.evictor_mutex); while (!G.evictor_shutdown) { struct arena_temp scratch = scratch_begin_no_conflict(); @@ -1249,7 +1249,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg) __profscope(eviction_scan); for (u64 i = 0; i < CACHE_BINS_COUNT; ++i) { struct cache_bin *bin = &G.cache.bins[i]; - struct sys_lock bin_lock = sys_mutex_lock_s(&bin->mutex); + struct sys_lock bin_lock = sys_mutex_lock_s(bin->mutex); { struct cache_entry *n = bin->first; while (n) { @@ -1302,7 +1302,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg) struct cache_entry *entry = en->cache_entry; i32 last_ref_cycle = en->last_ref_cycle; b32 cache_over_budget_target = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET_TARGET; - struct sys_lock bin_lock = sys_mutex_lock_e(&bin->mutex); + struct sys_lock bin_lock = sys_mutex_lock_e(bin->mutex); { u64 refcount_uncast = atomic_u64_eval(&entry->refcount_struct); struct cache_refcount refcount = *(struct cache_refcount *)&refcount_uncast; @@ -1353,7 +1353,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg) /* Add evicted nodes to free list */ { __profscope(eviction_free_list_append); - struct sys_lock pool_lock = sys_mutex_lock_e(&G.cache.entry_pool_mutex); + struct sys_lock pool_lock = sys_mutex_lock_e(G.cache.entry_pool_mutex); for (struct evict_node *en = first_evicted; en; en = en->next_evicted) { struct cache_entry *n = en->cache_entry; n->next_free = G.cache.entry_pool_first_free; @@ -1367,7 +1367,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg) scratch_end(scratch); /* Wait */ - sys_condition_variable_wait_time(&G.evictor_cv, &evictor_lock, SECONDS_FROM_NS(EVICTOR_CYCLE_INTERVAL_NS)); + sys_condition_variable_wait_time(G.evictor_cv, &evictor_lock, SECONDS_FROM_NS(EVICTOR_CYCLE_INTERVAL_NS)); } sys_mutex_unlock(&evictor_lock); } diff --git a/src/sys.h b/src/sys.h index 24890aac..530e44a2 100644 --- a/src/sys.h +++ b/src/sys.h @@ -361,23 +361,12 @@ void sys_window_cursor_disable_clip(struct sys_window *sys_window); * Mutex * ========================== */ -struct sys_mutex { - u64 handle; -#if PROFILING - struct __proflock_ctx *profiling_ctx; -#endif -#if RTC - u64 owner_tid; - struct atomic_i64 count; -#endif -}; - struct sys_lock { - b32 exclusive; struct sys_mutex *mutex; + b32 exclusive; }; -struct sys_mutex sys_mutex_alloc(void); +struct sys_mutex *sys_mutex_alloc(void); void sys_mutex_release(struct sys_mutex *mutex); struct sys_lock sys_mutex_lock_e(struct sys_mutex *mutex); struct sys_lock sys_mutex_lock_s(struct sys_mutex *mutex); @@ -395,19 +384,12 @@ void sys_assert_locked_e_or_s(struct sys_lock *lock, struct sys_mutex *mutex); * Condition variable * ========================== */ -struct sys_condition_variable { - u64 handle; -#if RTC - struct atomic_i64 num_waiters; -#endif -}; - -struct sys_condition_variable sys_condition_variable_alloc(void); -void sys_condition_variable_release(struct sys_condition_variable *cv); -void sys_condition_variable_wait(struct sys_condition_variable *cv, struct sys_lock *lock); -void sys_condition_variable_wait_time(struct sys_condition_variable *cv, struct sys_lock *lock, f64 seconds); -void sys_condition_variable_signal(struct sys_condition_variable *cv, u32 count); -void sys_condition_variable_broadcast(struct sys_condition_variable *cv); +struct sys_condition_variable *sys_condition_variable_alloc(void); +void sys_condition_variable_release(struct sys_condition_variable *sys_cv); +void sys_condition_variable_wait(struct sys_condition_variable *sys_cv, struct sys_lock *lock); +void sys_condition_variable_wait_time(struct sys_condition_variable *sys_cv, struct sys_lock *lock, f64 seconds); +void sys_condition_variable_signal(struct sys_condition_variable *sys_cv, u32 count); +void sys_condition_variable_broadcast(struct sys_condition_variable *sys_cv); /* ========================== * * Thread local storage diff --git a/src/sys_win32.c b/src/sys_win32.c index 39662080..218a6eb5 100644 --- a/src/sys_win32.c +++ b/src/sys_win32.c @@ -33,6 +33,27 @@ #define SYS_WINDOW_EVENT_LISTENERS_MAX 512 #define WINDOW_CLASS_NAME L"power_play_window_class" +struct win32_mutex { + SRWLOCK srwlock; + struct win32_mutex *next_free; + +#if PROFILING + struct __proflock_ctx *profiling_ctx; +#endif +#if RTC + u64 owner_tid; + struct atomic_i64 count; +#endif +}; + +struct win32_condition_variable { + CONDITION_VARIABLE condition_variable; + struct win32_condition_variable *next_free; +#if RTC + struct atomic_i64 num_waiters; +#endif +}; + struct win32_thread { sys_thread_entry_point_func *entry_point; void *thread_data; @@ -45,11 +66,6 @@ struct win32_thread { HANDLE handle; }; -struct win32_condition_variable { - CONDITION_VARIABLE condition_variable; - struct win32_condition_variable *next_free; -}; - enum win32_window_cursor_set_flag { WIN32_WINDOW_CURSOR_SET_FLAG_NONE = 0x0, WIN32_WINDOW_CURSOR_SET_FLAG_POSITION = 0x1, @@ -68,7 +84,7 @@ struct win32_window { u16 utf16_high_surrogate_last_input; - struct sys_mutex settings_mutex; + struct sys_mutex *settings_mutex; struct sys_window_settings settings; i32 monitor_width; @@ -86,7 +102,7 @@ struct win32_window { struct atomic_i32 event_thread_shutdown; struct sys_thread *event_thread; - struct sys_mutex event_callbacks_mutex; + struct sys_mutex *event_callbacks_mutex; sys_window_event_callback_func *event_callbacks[SYS_WINDOW_EVENT_LISTENERS_MAX]; u64 event_callbacks_count; @@ -116,26 +132,31 @@ GLOBAL struct { /* Lookup tables */ enum sys_btn vk_btn_table[256]; - /* Condition variables */ - struct sys_mutex condition_variables_mutex; + /* Mutexes pool */ + struct sys_mutex *mutexes_mutex; + struct arena *mutexes_arena; + struct win32_mutex *first_free_mutex; + + /* Condition variables pool */ + struct sys_mutex *condition_variables_mutex; struct arena *condition_variables_arena; struct win32_condition_variable *first_free_condition_variable; - /* Thread params */ - struct sys_mutex threads_mutex; + /* Threads pool */ + struct sys_mutex *threads_mutex; struct arena *threads_arena; struct win32_thread *threads_first; struct win32_thread *threads_last; struct win32_thread *threads_first_free; - /* Watches */ - struct sys_mutex watches_mutex; + /* Watches pool */ + struct sys_mutex *watches_mutex; struct arena *watches_arena; struct win32_watch *watches_first_free; - /* Windows */ + /* Windows pool */ WNDCLASSEXW window_class; - struct sys_mutex windows_mutex; + struct sys_mutex *windows_mutex; struct arena *windows_arena; struct win32_window *first_free_window; } G = ZI, DEBUG_ALIAS(G, G_sys_win32); @@ -692,7 +713,7 @@ struct sys_watch *sys_watch_alloc(struct string dir_path) struct win32_watch *w32_watch = NULL; { - struct sys_lock lock = sys_mutex_lock_e(&G.watches_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.watches_mutex); { if (G.watches_first_free) { w32_watch = G.watches_first_free; @@ -728,7 +749,7 @@ void sys_watch_release(struct sys_watch *dw) CloseHandle(w32_watch->dir_handle); CloseHandle(w32_watch->wake_handle); - struct sys_lock lock = sys_mutex_lock_e(&G.watches_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.watches_mutex); { w32_watch->next_free = G.watches_first_free; G.watches_first_free = w32_watch; @@ -879,7 +900,7 @@ INTERNAL void win32_window_wake(struct win32_window *window); INTERNAL void win32_window_process_event(struct win32_window *window, struct sys_event event) { __prof; - struct sys_lock lock = sys_mutex_lock_e(&window->event_callbacks_mutex); + struct sys_lock lock = sys_mutex_lock_e(window->event_callbacks_mutex); for (u64 i = 0; i < window->event_callbacks_count; ++i) { window->event_callbacks[i](event); } @@ -1010,7 +1031,7 @@ INTERNAL struct win32_window *win32_window_alloc(void) { struct win32_window *window = NULL; { - struct sys_lock lock = sys_mutex_lock_e(&G.windows_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.windows_mutex); if (G.first_free_window) { window = G.first_free_window; G.first_free_window = window->next_free; @@ -1039,7 +1060,7 @@ INTERNAL struct win32_window *win32_window_alloc(void) INTERNAL void win32_window_release(struct win32_window *window) { - struct sys_lock lock = sys_mutex_lock_e(&G.windows_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.windows_mutex); window->next_free = G.first_free_window; G.first_free_window = window; @@ -1050,8 +1071,8 @@ INTERNAL void win32_window_release(struct win32_window *window) sys_thread_wait_release(window->event_thread); /* Release mutexes */ - sys_mutex_release(&window->event_callbacks_mutex); - sys_mutex_release(&window->settings_mutex); + sys_mutex_release(window->event_callbacks_mutex); + sys_mutex_release(window->settings_mutex); /* Release sync flag */ sync_flag_release(&window->ready_sf); @@ -1427,7 +1448,7 @@ void sys_window_release(struct sys_window *sys_window) void sys_window_register_event_callback(struct sys_window *sys_window, sys_window_event_callback_func *func) { struct win32_window *window = (struct win32_window *)sys_window; - struct sys_lock lock = sys_mutex_lock_e(&window->event_callbacks_mutex); + struct sys_lock lock = sys_mutex_lock_e(window->event_callbacks_mutex); { if (window->event_callbacks_count + 1 > ARRAY_COUNT(window->event_callbacks)) { sys_panic(LIT("Too many window event callbacks registered")); @@ -1442,7 +1463,7 @@ void sys_window_unregister_event_callback(struct sys_window *sys_window, sys_win { struct win32_window *window = (struct win32_window *)sys_window; - struct sys_lock lock = sys_mutex_lock_e(&window->event_callbacks_mutex); + struct sys_lock lock = sys_mutex_lock_e(window->event_callbacks_mutex); { u64 count = window->event_callbacks_count; sys_window_event_callback_func *last = count > 0 ? window->event_callbacks[count - 1] : NULL; @@ -1464,7 +1485,7 @@ void sys_window_update_settings(struct sys_window *sys_window, struct sys_window { __prof; struct win32_window *window = (struct win32_window *)sys_window; - struct sys_lock lock = sys_mutex_lock_e(&window->settings_mutex); + struct sys_lock lock = sys_mutex_lock_e(window->settings_mutex); { win32_update_window_from_settings(window, settings); } @@ -1483,7 +1504,7 @@ void sys_window_show(struct sys_window *sys_window) { struct win32_window *window = (struct win32_window *)sys_window; HWND hwnd = window->hwnd; - struct sys_lock lock = sys_mutex_lock_e(&window->settings_mutex); + struct sys_lock lock = sys_mutex_lock_e(window->settings_mutex); { i32 show_cmd = SW_NORMAL; struct sys_window_settings *settings = &window->settings; @@ -1558,36 +1579,51 @@ void sys_window_cursor_disable_clip(struct sys_window *sys_window) * Mutex * ========================== */ -struct sys_mutex sys_mutex_alloc(void) +INTERNAL void win32_mutex_init(struct win32_mutex *m) +{ + MEMZERO_STRUCT(m); + __proflock_alloc(m->profiling_ctx); + m->srwlock = (SRWLOCK)SRWLOCK_INIT; +} + +struct sys_mutex *sys_mutex_alloc(void) { __prof; - struct sys_mutex mutex = ZI; - - __proflock_alloc(mutex.profiling_ctx); - SRWLOCK srwlock = SRWLOCK_INIT; - mutex.handle = *(u64 *)&srwlock; - - return mutex; + struct win32_mutex *m = NULL; + { + struct sys_lock lock = sys_mutex_lock_e(G.mutexes_mutex); + if (G.first_free_mutex) { + m = G.first_free_mutex; + G.first_free_mutex = m->next_free; + } else { + m = arena_push_no_zero(G.mutexes_arena, struct win32_mutex); + } + sys_mutex_unlock(&lock); + } + MEMZERO_STRUCT(m); + win32_mutex_init(m); + return (struct sys_mutex *)m; } void sys_mutex_release(struct sys_mutex *mutex) { __prof; - (UNUSED)mutex; - __proflock_release(mutex->profiling_ctx); + struct win32_mutex *m = (struct win32_mutex *)mutex; + __proflock_release(m->profiling_ctx); /* Mutex should be unlocked */ - ASSERT(atomic_i64_eval(&mutex->count) == 0); + ASSERT(atomic_i64_eval(&m->count) == 0); } struct sys_lock sys_mutex_lock_e(struct sys_mutex *mutex) { __prof; - __proflock_before_exclusive_lock(mutex->profiling_ctx); - AcquireSRWLockExclusive((SRWLOCK *)&mutex->handle); - __proflock_after_exclusive_lock(mutex->profiling_ctx); + struct win32_mutex *m = (struct win32_mutex *)mutex; + __proflock_before_exclusive_lock(m->profiling_ctx); + AcquireSRWLockExclusive((SRWLOCK *)&m->srwlock); + __proflock_after_exclusive_lock(m->profiling_ctx); #if RTC - mutex->owner_tid = (u64)GetCurrentThreadId(); - atomic_i64_eval_add(&mutex->count, 1); + m->owner_tid = (u64)GetCurrentThreadId(); + atomic_i64_eval_add(&m->count, 1); #endif struct sys_lock lock = ZI; lock.exclusive = true; @@ -1598,11 +1634,12 @@ struct sys_lock sys_mutex_lock_e(struct sys_mutex *mutex) struct sys_lock sys_mutex_lock_s(struct sys_mutex *mutex) { __prof; - __proflock_before_shared_lock(mutex->profiling_ctx); - AcquireSRWLockShared((SRWLOCK *)&mutex->handle); - __proflock_after_shared_lock(mutex->profiling_ctx); + struct win32_mutex *m = (struct win32_mutex *)mutex; + __proflock_before_shared_lock(m->profiling_ctx); + AcquireSRWLockShared((SRWLOCK *)&m->srwlock); + __proflock_after_shared_lock(m->profiling_ctx); #if RTC - atomic_i64_eval_add(&mutex->count, 1); + atomic_i64_eval_add(&m->count, 1); #endif struct sys_lock lock = ZI; lock.mutex = mutex; @@ -1612,16 +1649,17 @@ struct sys_lock sys_mutex_lock_s(struct sys_mutex *mutex) void sys_mutex_unlock(struct sys_lock *lock) { __prof; + struct win32_mutex *m = (struct win32_mutex *)lock->mutex; #if RTC - atomic_i64_eval_add(&lock->mutex->count, -1); - lock->mutex->owner_tid = 0; + atomic_i64_eval_add(&m->count, -1); + m->owner_tid = 0; #endif if (lock->exclusive) { - ReleaseSRWLockExclusive((SRWLOCK *)&lock->mutex->handle); - __proflock_after_exclusive_unlock(lock->mutex->profiling_ctx); + ReleaseSRWLockExclusive((SRWLOCK *)&m->srwlock); + __proflock_after_exclusive_unlock(m->profiling_ctx); } else { - ReleaseSRWLockShared((SRWLOCK *)&lock->mutex->handle); - __proflock_after_shared_unlock(lock->mutex->profiling_ctx); + ReleaseSRWLockShared((SRWLOCK *)&m->srwlock); + __proflock_after_shared_unlock(m->profiling_ctx); } MEMZERO_STRUCT(lock); } @@ -1648,7 +1686,7 @@ INTERNAL struct win32_condition_variable *win32_condition_variable_alloc(void) __prof; struct win32_condition_variable *cv = NULL; { - struct sys_lock lock = sys_mutex_lock_e(&G.condition_variables_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.condition_variables_mutex); if (G.first_free_condition_variable) { cv = G.first_free_condition_variable; G.first_free_condition_variable = cv->next_free; @@ -1667,127 +1705,125 @@ INTERNAL struct win32_condition_variable *win32_condition_variable_alloc(void) INTERNAL void win32_condition_variable_release(struct win32_condition_variable *w32cv) { __prof; - struct sys_lock lock = sys_mutex_lock_e(&G.condition_variables_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.condition_variables_mutex); w32cv->next_free = G.first_free_condition_variable; G.first_free_condition_variable = w32cv; sys_mutex_unlock(&lock); } -struct sys_condition_variable sys_condition_variable_alloc(void) +struct sys_condition_variable *sys_condition_variable_alloc(void) { __prof; - struct sys_condition_variable cv = { - .handle = (u64)win32_condition_variable_alloc() - }; - return cv; + return (struct sys_condition_variable *)win32_condition_variable_alloc(); } -void sys_condition_variable_release(struct sys_condition_variable *cv) +void sys_condition_variable_release(struct sys_condition_variable *sys_cv) { __prof; + struct win32_condition_variable *cv = (struct win32_condition_variable *)sys_cv; /* Condition variable must not have any sleepers (signal before releasing) */ ASSERT(atomic_i64_eval(&cv->num_waiters) == 0); - win32_condition_variable_release((struct win32_condition_variable *)cv->handle); + win32_condition_variable_release(cv); } -void sys_condition_variable_wait(struct sys_condition_variable *cv, struct sys_lock *lock) +void sys_condition_variable_wait(struct sys_condition_variable *sys_cv, struct sys_lock *lock) { __prof; - struct sys_mutex *mutex = lock->mutex; + struct win32_condition_variable *cv = (struct win32_condition_variable *)sys_cv; + struct win32_mutex *m = (struct win32_mutex *)lock->mutex; b32 exclusive = lock->exclusive; #if RTC atomic_i64_eval_add(&cv->num_waiters, 1); if (exclusive) { - mutex->owner_tid = 0; + m->owner_tid = 0; } - atomic_i64_eval_add(&mutex->count, -1); + atomic_i64_eval_add(&m->count, -1); #endif - struct win32_condition_variable *w32cv = (struct win32_condition_variable *)cv->handle; /* TODO: Correct profiling of internal condition variable sleep / wait mutex state */ if (exclusive) { - __proflock_after_exclusive_unlock(mutex->profiling_ctx); + __proflock_after_exclusive_unlock(m->profiling_ctx); } else { - __proflock_after_shared_unlock(mutex->profiling_ctx); + __proflock_after_shared_unlock(m->profiling_ctx); } - SleepConditionVariableSRW(&w32cv->condition_variable, (SRWLOCK *)&mutex->handle, INFINITE, exclusive ? 0 : CONDITION_VARIABLE_LOCKMODE_SHARED); + SleepConditionVariableSRW(&cv->condition_variable, (SRWLOCK *)&m->srwlock, INFINITE, exclusive ? 0 : CONDITION_VARIABLE_LOCKMODE_SHARED); if (exclusive) { - __proflock_before_exclusive_lock(mutex->profiling_ctx); - __proflock_after_exclusive_lock(mutex->profiling_ctx); + __proflock_before_exclusive_lock(m->profiling_ctx); + __proflock_after_exclusive_lock(m->profiling_ctx); } else { - __proflock_before_shared_lock(mutex->profiling_ctx); - __proflock_after_shared_lock(mutex->profiling_ctx); + __proflock_before_shared_lock(m->profiling_ctx); + __proflock_after_shared_lock(m->profiling_ctx); } #if RTC - atomic_i64_eval_add(&mutex->count, 1); + atomic_i64_eval_add(&m->count, 1); if (exclusive) { - mutex->owner_tid = (u64)GetCurrentThreadId(); + m->owner_tid = (u64)GetCurrentThreadId(); } atomic_i64_eval_add(&cv->num_waiters, -1); #endif } -void sys_condition_variable_wait_time(struct sys_condition_variable *cv, struct sys_lock *lock, f64 seconds) +void sys_condition_variable_wait_time(struct sys_condition_variable *sys_cv, struct sys_lock *lock, f64 seconds) { __prof; - struct sys_mutex *mutex = lock->mutex; + struct win32_condition_variable *cv = (struct win32_condition_variable *)sys_cv; + struct win32_mutex *m = (struct win32_mutex *)lock->mutex; b32 exclusive = lock->exclusive; #if RTC atomic_i64_eval_add(&cv->num_waiters, 1); if (exclusive) { - mutex->owner_tid = 0; + m->owner_tid = 0; } - atomic_i64_eval_add(&mutex->count, -1); + atomic_i64_eval_add(&m->count, -1); #endif - struct win32_condition_variable *w32cv = (struct win32_condition_variable *)cv->handle; u32 ms = (u32)math_round_to_int((f32)seconds * 1000.f); /* TODO: Correct profiling of internal condition variable sleep / wait mutex state */ if (exclusive) { - __proflock_after_exclusive_unlock(mutex->profiling_ctx); + __proflock_after_exclusive_unlock(m->profiling_ctx); } else { - __proflock_after_shared_unlock(mutex->profiling_ctx); + __proflock_after_shared_unlock(m->profiling_ctx); } - SleepConditionVariableSRW(&w32cv->condition_variable, (SRWLOCK *)&mutex->handle, ms, exclusive ? 0 : CONDITION_VARIABLE_LOCKMODE_SHARED); + SleepConditionVariableSRW(&cv->condition_variable, (SRWLOCK *)&m->srwlock, ms, exclusive ? 0 : CONDITION_VARIABLE_LOCKMODE_SHARED); if (exclusive) { - __proflock_before_exclusive_lock(mutex->profiling_ctx); - __proflock_after_exclusive_lock(mutex->profiling_ctx); + __proflock_before_exclusive_lock(m->profiling_ctx); + __proflock_after_exclusive_lock(m->profiling_ctx); } else { - __proflock_before_shared_lock(mutex->profiling_ctx); - __proflock_after_shared_lock(mutex->profiling_ctx); + __proflock_before_shared_lock(m->profiling_ctx); + __proflock_after_shared_lock(m->profiling_ctx); } #if RTC - atomic_i64_eval_add(&mutex->count, 1); + atomic_i64_eval_add(&m->count, 1); if (exclusive) { - mutex->owner_tid = (u64)GetCurrentThreadId(); + m->owner_tid = (u64)GetCurrentThreadId(); } atomic_i64_eval_add(&cv->num_waiters, -1); #endif } -void sys_condition_variable_signal(struct sys_condition_variable *cv, u32 count) +void sys_condition_variable_signal(struct sys_condition_variable *sys_cv, u32 count) { __prof; - struct win32_condition_variable *w32cv = (struct win32_condition_variable *)cv->handle; + struct win32_condition_variable *cv = (struct win32_condition_variable *)sys_cv; /* Windows will wake all waiters if many single-wakes occur anyway, so we * might as well wake all ourselves. * https://devblogs.microsoft.com/oldnewthing/20180201-00/?p=97946 */ if (count <= 24) { for (u32 i = 0; i < count; ++i) { - WakeConditionVariable(&w32cv->condition_variable); + WakeConditionVariable(&cv->condition_variable); } } else { - WakeAllConditionVariable(&w32cv->condition_variable); + WakeAllConditionVariable(&cv->condition_variable); } } -void sys_condition_variable_broadcast(struct sys_condition_variable *cv) +void sys_condition_variable_broadcast(struct sys_condition_variable *sys_cv) { __prof; - struct win32_condition_variable *w32cv = (struct win32_condition_variable *)cv->handle; - WakeAllConditionVariable(&w32cv->condition_variable); + struct win32_condition_variable *cv = (struct win32_condition_variable *)sys_cv; + WakeAllConditionVariable(&cv->condition_variable); } /* ========================== * @@ -1836,7 +1872,7 @@ struct thread_local_store *sys_thread_get_thread_local_store(void) INTERNAL struct win32_thread *win32_thread_alloc(void) { struct win32_thread *t = NULL; - struct sys_lock lock = sys_mutex_lock_e(&G.threads_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.threads_mutex); { if (G.threads_first_free) { t = G.threads_first_free; @@ -1859,7 +1895,7 @@ INTERNAL struct win32_thread *win32_thread_alloc(void) INTERNAL void win32_thread_release(struct win32_thread *t) { - struct sys_lock lock = sys_mutex_lock_e(&G.threads_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.threads_mutex); { if (t->prev) { t->prev->next = t->next; @@ -2324,6 +2360,12 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance, /* Set up timing period */ timeBeginPeriod(G.scheduler_period_ms); + /* Setup mutexes */ + struct win32_mutex first_mutex = ZI; + win32_mutex_init(&first_mutex); + G.mutexes_mutex = (struct sys_mutex *)&first_mutex; + G.mutexes_arena = arena_alloc(GIGABYTE(64)); + /* Set up condition variables */ G.condition_variables_mutex = sys_mutex_alloc(); G.condition_variables_arena = arena_alloc(GIGABYTE(64)); @@ -2405,7 +2447,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance, /* Get app thread handle */ HANDLE app_thread_handle = 0; - struct sys_lock lock = sys_mutex_lock_s(&G.threads_mutex); + struct sys_lock lock = sys_mutex_lock_s(G.threads_mutex); { struct win32_thread *wt = (struct win32_thread *)app_thread; app_thread_handle = wt->handle; @@ -2429,7 +2471,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance, /* Find any dangling threads that haven't exited gracefully by now */ if (!atomic_i32_eval(&G.panicking)) { - struct sys_lock lock = sys_mutex_lock_s(&G.threads_mutex); + struct sys_lock lock = sys_mutex_lock_s(G.threads_mutex); if (G.threads_first) { struct arena_temp scratch = scratch_begin_no_conflict(); u64 num_dangling_threads = 0; diff --git a/src/user.c b/src/user.c index 90a91581..77694029 100644 --- a/src/user.c +++ b/src/user.c @@ -88,7 +88,7 @@ GLOBAL struct { b32 debug_draw; /* Debug console */ - struct sys_mutex console_logs_mutex; + struct sys_mutex *console_logs_mutex; struct arena *console_logs_arena; struct console_log *first_console_log; struct console_log *last_console_log; @@ -97,11 +97,11 @@ GLOBAL struct { b32 debug_console; /* Window -> user */ - struct sys_mutex sys_events_mutex; + struct sys_mutex *sys_events_mutex; struct arena *sys_events_arena; /* User -> local sim */ - struct sys_mutex user_sim_cmd_mutex; + struct sys_mutex *user_sim_cmd_mutex; struct sim_control user_sim_cmd_control; struct sim_ent_id user_hovered_ent; u64 last_user_sim_cmd_gen; @@ -111,7 +111,7 @@ GLOBAL struct { struct atomic_i32 user_paused_steps; /* Local sim -> user */ - struct sys_mutex local_to_user_client_mutex; + struct sys_mutex *local_to_user_client_mutex; struct sim_client_store *local_to_user_client_store; struct sim_client *local_to_user_client; i64 local_to_user_client_publish_dt_ns; @@ -302,7 +302,7 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(user_shutdown) INTERNAL struct sys_event_array pop_sys_events(struct arena *arena) { struct sys_event_array array = ZI; - struct sys_lock lock = sys_mutex_lock_e(&G.sys_events_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.sys_events_mutex); { struct sys_event *src_events = (struct sys_event *)arena_base(G.sys_events_arena); array.count = G.sys_events_arena->pos / sizeof(*src_events); @@ -316,7 +316,7 @@ INTERNAL struct sys_event_array pop_sys_events(struct arena *arena) INTERNAL SYS_WINDOW_EVENT_CALLBACK_FUNC_DEF(window_event_callback, event) { - struct sys_lock lock = sys_mutex_lock_e(&G.sys_events_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.sys_events_mutex); { *arena_push_no_zero(G.sys_events_arena, struct sys_event) = event; } @@ -462,7 +462,7 @@ INTERNAL struct string get_ent_debug_text(struct arena *arena, struct sim_ent *e INTERNAL LOG_EVENT_CALLBACK_FUNC_DEF(debug_console_log_callback, log) { - struct sys_lock lock = sys_mutex_lock_e(&G.console_logs_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.console_logs_mutex); { struct console_log *clog = arena_push(G.console_logs_arena, struct console_log); clog->level = log.level; @@ -522,7 +522,7 @@ INTERNAL void draw_debug_console(i32 level, b32 minimized) i64 now_ns = sys_time_ns(); struct font *font = font_load_async(LIT("font/fixedsys.ttf"), 12.0f); if (font) { - struct sys_lock lock = sys_mutex_lock_e(&G.console_logs_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.console_logs_mutex); { for (struct console_log *log = G.last_console_log; log; log = log->prev) { f32 opacity = 0.75; @@ -640,7 +640,7 @@ INTERNAL void user_update(void) * ========================== */ { - struct sys_lock lock = sys_mutex_lock_e(&G.local_to_user_client_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.local_to_user_client_mutex); u64 old_last_tick = G.user_unblended_client->last_tick; u64 last_tick = G.local_to_user_client->last_tick; if (last_tick > old_last_tick) { @@ -1870,7 +1870,7 @@ INTERNAL void user_update(void) /* Set user sim control */ { - struct sys_lock lock = sys_mutex_lock_e(&G.user_sim_cmd_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.user_sim_cmd_mutex); /* Reset flags */ if (G.user_sim_cmd_gen != G.last_user_sim_cmd_gen) { @@ -2152,7 +2152,7 @@ INTERNAL void generate_user_input_cmds(struct sim_client *user_input_client, u64 sim_ent_activate(control_cmd, user_input_ss->tick); } { - struct sys_lock lock = sys_mutex_lock_e(&G.user_sim_cmd_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.user_sim_cmd_mutex); /* Update control cmd */ { control_cmd->cmd_control = G.user_sim_cmd_control; @@ -2767,7 +2767,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(user_local_sim_thread_entry_point, arg) struct sim_snapshot *local_ss = sim_snapshot_from_tick(local_client, local_client->last_tick); if (local_ss->valid) { /* TODO: Double buffer */ - struct sys_lock lock = sys_mutex_lock_e(&G.local_to_user_client_mutex); + struct sys_lock lock = sys_mutex_lock_e(G.local_to_user_client_mutex); sim_snapshot_alloc(G.local_to_user_client, local_ss, local_ss->tick); i64 publish_ns = sys_time_ns(); G.local_to_user_client_publish_dt_ns = publish_ns - last_publish_to_user_ns; diff --git a/src/util.h b/src/util.h index 628f65c8..cc6356ee 100644 --- a/src/util.h +++ b/src/util.h @@ -262,8 +262,8 @@ INLINE void dict_remove_entry(struct dict *dict, struct dict_entry *entry) * ========================== */ struct sync_flag { - struct sys_mutex mutex; - struct sys_condition_variable cv; + struct sys_mutex *mutex; + struct sys_condition_variable *cv; b32 flag; }; @@ -277,25 +277,25 @@ INLINE struct sync_flag sync_flag_alloc(void) INLINE void sync_flag_release(struct sync_flag *sf) { - sys_mutex_release(&sf->mutex); - sys_condition_variable_release(&sf->cv); + sys_mutex_release(sf->mutex); + sys_condition_variable_release(sf->cv); } INLINE void sync_flag_set(struct sync_flag *sf) { __prof; - struct sys_lock lock = sys_mutex_lock_e(&sf->mutex); + struct sys_lock lock = sys_mutex_lock_e(sf->mutex); sf->flag = 1; - sys_condition_variable_broadcast(&sf->cv); + sys_condition_variable_broadcast(sf->cv); sys_mutex_unlock(&lock); } INLINE void sync_flag_wait(struct sync_flag *sf) { __prof; - struct sys_lock lock = sys_mutex_lock_s(&sf->mutex); + struct sys_lock lock = sys_mutex_lock_s(sf->mutex); while (sf->flag != 1) { - sys_condition_variable_wait(&sf->cv, &lock); + sys_condition_variable_wait(sf->cv, &lock); } sys_mutex_unlock(&lock); } diff --git a/src/work.c b/src/work.c index ef7d2f17..079774ff 100644 --- a/src/work.c +++ b/src/work.c @@ -36,7 +36,7 @@ struct work { enum work_status status; u32 workers; - struct sys_condition_variable condition_variable_finished; + struct sys_condition_variable *condition_variable_finished; struct work *prev_scheduled; struct work *next_scheduled; @@ -65,8 +65,8 @@ GLOBAL struct { struct arena *arena; b32 workers_shutdown; - struct sys_mutex mutex; - struct sys_condition_variable cv; + struct sys_mutex *mutex; + struct sys_condition_variable *cv; u32 worker_count; u32 idle_worker_count; @@ -117,7 +117,7 @@ struct work_startup_receipt work_startup(u32 num_worker_threads) app_register_exit_callback(&work_shutdown); /* Initialize threads */ - struct sys_lock lock = sys_mutex_lock_e(&G.mutex); + struct sys_lock lock = sys_mutex_lock_e(G.mutex); { struct worker *prev = NULL; for (u32 i = 0; i < num_worker_threads; ++i) { @@ -146,10 +146,10 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(work_shutdown) { __prof; - struct sys_lock lock = sys_mutex_lock_e(&G.mutex); + struct sys_lock lock = sys_mutex_lock_e(G.mutex); { G.workers_shutdown = true; - sys_condition_variable_broadcast(&G.cv); + sys_condition_variable_broadcast(G.cv); } sys_mutex_unlock(&lock); @@ -165,7 +165,7 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(work_shutdown) INTERNAL struct work *work_alloc_locked(struct sys_lock *lock) { __prof; - sys_assert_locked_e(lock, &G.mutex); + sys_assert_locked_e(lock, G.mutex); (UNUSED)lock; struct work *work = NULL; @@ -192,7 +192,7 @@ INTERNAL struct work *work_alloc_locked(struct sys_lock *lock) INTERNAL void work_release_locked(struct sys_lock *lock, struct work *work) { - sys_assert_locked_e(lock, &G.mutex); + sys_assert_locked_e(lock, G.mutex); (UNUSED)lock; work->next_free = G.free_work_head; @@ -202,7 +202,7 @@ INTERNAL void work_release_locked(struct sys_lock *lock, struct work *work) INTERNAL struct work_handle work_to_handle_locked(struct sys_lock *lock, struct work *work) { - sys_assert_locked_e(lock, &G.mutex); + sys_assert_locked_e(lock, G.mutex); (UNUSED)lock; return (struct work_handle) { @@ -213,7 +213,7 @@ INTERNAL struct work_handle work_to_handle_locked(struct sys_lock *lock, struct INTERNAL struct work_task *task_alloc_locked(struct sys_lock *lock) { - sys_assert_locked_e(lock, &G.mutex); + sys_assert_locked_e(lock, G.mutex); (UNUSED)lock; struct work_task *task = NULL; @@ -234,7 +234,7 @@ INTERNAL struct work_task *task_alloc_locked(struct sys_lock *lock) INTERNAL void task_release_locked(struct sys_lock *lock, struct work_task *task) { - sys_assert_locked_e(lock, &G.mutex); + sys_assert_locked_e(lock, G.mutex); (UNUSED)lock; task->next_free = G.free_task_head; @@ -248,7 +248,7 @@ INTERNAL void task_release_locked(struct sys_lock *lock, struct work_task *task) INTERNAL void work_schedule_locked(struct sys_lock *lock, struct work *work) { __prof; - sys_assert_locked_e(lock, &G.mutex); + sys_assert_locked_e(lock, G.mutex); (UNUSED)lock; enum work_priority priority = work->priority; @@ -281,13 +281,13 @@ INTERNAL void work_schedule_locked(struct sys_lock *lock, struct work *work) G.scheduled_work_priority_tails[priority] = work; - sys_condition_variable_signal(&G.cv, work->tasks_incomplete); + sys_condition_variable_signal(G.cv, work->tasks_incomplete); } INTERNAL void work_unschedule_locked(struct sys_lock *lock, struct work *work) { __prof; - sys_assert_locked_e(lock, &G.mutex); + sys_assert_locked_e(lock, G.mutex); (UNUSED)lock; struct work *prev = (struct work *)work->prev_scheduled; @@ -319,7 +319,7 @@ INTERNAL void work_unschedule_locked(struct sys_lock *lock, struct work *work) INTERNAL struct work_task *work_dequeue_task_locked(struct sys_lock *lock, struct work *work) { __prof; - sys_assert_locked_e(lock, &G.mutex); + sys_assert_locked_e(lock, G.mutex); struct work_task *task = work->task_head; if (task) { @@ -341,7 +341,7 @@ INTERNAL struct work_task *work_dequeue_task_locked(struct sys_lock *lock, struc INTERNAL b32 work_exec_single_task_maybe_release_locked(struct sys_lock *lock, struct work *work) { __prof; - sys_assert_locked_e(lock, &G.mutex); + sys_assert_locked_e(lock, G.mutex); struct work_task *task = work_dequeue_task_locked(lock, work); b32 more_tasks = work->task_head != NULL; @@ -354,7 +354,7 @@ INTERNAL b32 work_exec_single_task_maybe_release_locked(struct sys_lock *lock, s { sys_mutex_unlock(lock); task->func(task->data); - *lock = sys_mutex_lock_e(&G.mutex); + *lock = sys_mutex_lock_e(G.mutex); } --work->workers; --work->tasks_incomplete; @@ -363,7 +363,7 @@ INTERNAL b32 work_exec_single_task_maybe_release_locked(struct sys_lock *lock, s if (work->tasks_incomplete == 0) { /* Signal finished */ work->status = WORK_STATUS_DONE; - sys_condition_variable_broadcast(&work->condition_variable_finished); + sys_condition_variable_broadcast(work->condition_variable_finished); /* Release */ work_release_locked(lock, work); @@ -376,7 +376,7 @@ INTERNAL b32 work_exec_single_task_maybe_release_locked(struct sys_lock *lock, s INTERNAL void work_exec_remaining_tasks_maybe_release_locked(struct sys_lock *lock, struct work *work) { __prof; - sys_assert_locked_e(lock, &G.mutex); + sys_assert_locked_e(lock, G.mutex); b32 more_tasks = true; while (more_tasks) { @@ -397,7 +397,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(worker_thread_entry_point, thread_data) .is_worker = true }; - struct sys_lock lock = sys_mutex_lock_e(&G.mutex); + struct sys_lock lock = sys_mutex_lock_e(G.mutex); { while (!G.workers_shutdown) { struct work *work = G.scheduled_work_head; @@ -407,7 +407,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(worker_thread_entry_point, thread_data) work_exec_single_task_maybe_release_locked(&lock, work); ++G.idle_worker_count; } else { - sys_condition_variable_wait(&G.cv, &lock); + sys_condition_variable_wait(G.cv, &lock); } } } @@ -422,7 +422,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(worker_thread_entry_point, thread_data) INTERNAL struct work_handle work_push_from_slate_locked(struct sys_lock *lock, struct work_slate *ws, b32 help, enum work_priority priority) { __prof; - sys_assert_locked_e(lock, &G.mutex); + sys_assert_locked_e(lock, G.mutex); struct work *work = work_alloc_locked(lock); struct work_handle wh = work_to_handle_locked(lock, work); @@ -467,7 +467,7 @@ INTERNAL struct work_handle work_push_from_slate_locked(struct sys_lock *lock, s INTERNAL struct work_handle work_push_task_internal(work_task_func *func, void *data, b32 help, enum work_priority priority) { struct work_handle handle; - struct sys_lock lock = sys_mutex_lock_e(&G.mutex); + struct sys_lock lock = sys_mutex_lock_e(G.mutex); { struct work_task *task = task_alloc_locked(&lock); task->data = data; @@ -511,7 +511,7 @@ void work_slate_push_task(struct work_slate *ws, work_task_func *func, void *dat __prof; struct work_task *task = NULL; - struct sys_lock lock = sys_mutex_lock_e(&G.mutex); + struct sys_lock lock = sys_mutex_lock_e(G.mutex); { task = task_alloc_locked(&lock); } @@ -536,7 +536,7 @@ struct work_handle work_slate_end(struct work_slate *ws, enum work_priority prio __prof; struct work_handle handle; - struct sys_lock lock = sys_mutex_lock_e(&G.mutex); + struct sys_lock lock = sys_mutex_lock_e(G.mutex); { handle = work_push_from_slate_locked(&lock, ws, false, priority); } @@ -549,7 +549,7 @@ struct work_handle work_slate_end_and_help(struct work_slate *ws, enum work_prio { __prof; - struct sys_lock lock = sys_mutex_lock_e(&G.mutex); + struct sys_lock lock = sys_mutex_lock_e(G.mutex); struct work_handle handle = work_push_from_slate_locked(&lock, ws, true, priority); sys_mutex_unlock(&lock); @@ -562,7 +562,7 @@ struct work_handle work_slate_end_and_help(struct work_slate *ws, enum work_prio INTERNAL struct work *work_from_handle_locked(struct sys_lock *lock, struct work_handle handle) { - sys_assert_locked_e(lock, &G.mutex); + sys_assert_locked_e(lock, G.mutex); (UNUSED)lock; struct work *work = handle.work; @@ -577,7 +577,7 @@ INTERNAL struct work *work_from_handle_locked(struct sys_lock *lock, struct work void work_wait(struct work_handle handle) { __prof; - struct sys_lock lock = sys_mutex_lock_e(&G.mutex); + struct sys_lock lock = sys_mutex_lock_e(G.mutex); { struct work *work = work_from_handle_locked(&lock, handle); if (work) { @@ -588,7 +588,7 @@ void work_wait(struct work_handle handle) work = work_from_handle_locked(&lock, handle); /* Re-checking work is sitll valid here in case work_exec caused work to release */ if (work) { while (work->status != WORK_STATUS_DONE) { - sys_condition_variable_wait(&work->condition_variable_finished, &lock); + sys_condition_variable_wait(work->condition_variable_finished, &lock); } } } @@ -600,7 +600,7 @@ void work_wait(struct work_handle handle) void work_help(struct work_handle handle) { __prof; - struct sys_lock lock = sys_mutex_lock_e(&G.mutex); + struct sys_lock lock = sys_mutex_lock_e(G.mutex); { struct work *work = work_from_handle_locked(&lock, handle); if (work) {