refactor mutexes to use 'sys_lock' objects. make all mutexes rw mutexes.

This commit is contained in:
jacob 2024-06-25 19:54:33 -05:00
parent 3061d465d1
commit 04db1226e2
15 changed files with 526 additions and 617 deletions

View File

@ -108,14 +108,12 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(exit_callback_thread_entry_point, vcall
void app_register_exit_callback(app_exit_callback_func *func)
{
sys_mutex_lock(&G.exit_callbacks_mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.exit_callbacks_mutex);
struct exit_callback *callback = arena_push_zero(&G.exit_callbacks_arena, struct exit_callback);
callback->func = func;
callback->next = G.exit_callbacks_head;
G.exit_callbacks_head = callback;
}
sys_mutex_unlock(&G.exit_callbacks_mutex);
sys_mutex_unlock(&lock);
}
/* ========================== *
@ -232,18 +230,21 @@ void app_entry_point(void)
/* FIXME: Only wait on threads for a certain period of time before
* forcing process exit (to prevent process hanging in the background
* when a thread gets stuck) */
sys_mutex_lock(&G.exit_callbacks_mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.exit_callbacks_mutex);
/* Start callback threads */
for (struct exit_callback *callback = G.exit_callbacks_head; callback; callback = callback->next) {
callback->thread = sys_thread_alloc(&exit_callback_thread_entry_point, callback, STR("[P4] Exit callback thread"));
}
/* Wait on callback threads */
for (struct exit_callback *callback = G.exit_callbacks_head; callback; callback = callback->next) {
sys_thread_wait_release(&callback->thread);
}
sys_mutex_unlock(&lock);
}
sys_mutex_unlock(&G.exit_callbacks_mutex);
/* Write window settings to file */
{

View File

@ -16,11 +16,11 @@
#define ASSET_LOOKUP_TABLE_CAPACITY (MAX_ASSETS * 4)
GLOBAL struct {
struct sys_rw_mutex lookup_rw_mutex;
struct sys_mutex lookup_mutex;
struct asset lookup[ASSET_LOOKUP_TABLE_CAPACITY];
u64 num_assets;
struct sys_rw_mutex store_rw_mutex;
struct sys_mutex store_mutex;
struct arena store_arena;
#if RTC
@ -40,9 +40,9 @@ struct asset_cache_startup_receipt asset_cache_startup(struct work_startup_recei
(UNUSED)work_sr;
/* Init lookup */
G.lookup_rw_mutex = sys_rw_mutex_alloc();
G.lookup_mutex = sys_mutex_alloc();
/* Init store */
G.store_rw_mutex = sys_rw_mutex_alloc();
G.store_mutex = sys_mutex_alloc();
G.store_arena = arena_alloc(GIGABYTE(64));
#if RTC
/* Init debug */
@ -59,8 +59,7 @@ struct asset_cache_startup_receipt asset_cache_startup(struct work_startup_recei
INTERNAL void refresh_dbg_table(void)
{
#if RTC
sys_mutex_lock(&G.dbg_table_mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.dbg_table_mutex);
MEMZERO_ARRAY(G.dbg_table);
G.dbg_table_count = 0;
for (u64 i = 0; i < ARRAY_COUNT(G.lookup); ++i) {
@ -69,15 +68,16 @@ INTERNAL void refresh_dbg_table(void)
G.dbg_table[G.dbg_table_count++] = asset;
}
}
}
sys_mutex_unlock(&G.dbg_table_mutex);
sys_mutex_unlock(&lock);
#endif
}
/* Returns first matching slot or first empty slot if not found.
* Check returned slot->hash != 0 for presence. */
INTERNAL struct asset *asset_cache_get_slot_assume_locked(struct string key, u64 hash)
INTERNAL struct asset *asset_cache_get_slot_assume_locked(struct sys_lock *lock, struct string key, u64 hash)
{
sys_assert_locked_s(lock, &G.lookup_mutex);
u64 index = hash % ARRAY_COUNT(G.lookup);
while (true) {
struct asset *slot = &G.lookup[index];
@ -122,17 +122,17 @@ struct asset *asset_cache_touch(struct string key, u64 hash, b32 *is_first_touch
/* Lookup */
{
sys_rw_mutex_lock_shared(&G.lookup_rw_mutex);
asset = asset_cache_get_slot_assume_locked(key, hash);
sys_rw_mutex_unlock_shared(&G.lookup_rw_mutex);
struct sys_lock lock = sys_mutex_lock_s(&G.lookup_mutex);
asset = asset_cache_get_slot_assume_locked(&lock, key, hash);
sys_mutex_unlock(&lock);
}
/* Insert if not found */
if (!asset->hash) {
sys_rw_mutex_lock_exclusive(&G.lookup_rw_mutex);
struct sys_lock lock = sys_mutex_lock_e(&G.lookup_mutex);
/* Re-check asset presence in case it was inserted since lock */
asset = asset_cache_get_slot_assume_locked(key, hash);
asset = asset_cache_get_slot_assume_locked(&lock, key, hash);
if (!asset->hash) {
if (G.num_assets >= MAX_ASSETS) {
@ -162,7 +162,7 @@ struct asset *asset_cache_touch(struct string key, u64 hash, b32 *is_first_touch
refresh_dbg_table();
}
sys_rw_mutex_unlock_exclusive(&G.lookup_rw_mutex);
sys_mutex_unlock(&lock);
}
return asset;
@ -231,15 +231,15 @@ void *asset_cache_get_store_data(struct asset *asset)
/* Asset store should be opened to allocate memory to the store arena */
struct asset_cache_store asset_cache_store_open(void)
{
struct sys_lock lock = sys_mutex_lock_e(&G.store_mutex);
struct asset_cache_store store = {
.rw_mutex = &G.store_rw_mutex,
.lock = lock,
.arena = &G.store_arena
};
sys_rw_mutex_lock_exclusive(store.rw_mutex);
return store;
}
void asset_cache_store_close(struct asset_cache_store *store)
{
sys_rw_mutex_unlock_exclusive(store->rw_mutex);
sys_mutex_unlock(&store->lock);
}

View File

@ -37,7 +37,7 @@ struct asset_cache_store {
struct arena *arena;
/* Internal */
struct sys_rw_mutex *rw_mutex;
struct sys_lock lock;
};
struct asset_cache_startup_receipt { i32 _; };

View File

@ -66,27 +66,25 @@ struct font_startup_receipt font_startup(struct work_startup_receipt *work_sr,
INTERNAL struct font_task_params *font_task_params_alloc(void)
{
struct font_task_params *p = NULL;
sys_mutex_lock(&G.params.mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.params.mutex);
if (G.params.head_free) {
p = G.params.head_free;
G.params.head_free = p->next_free;
} else {
p = arena_push_zero(&G.params.arena, struct font_task_params);
}
sys_mutex_unlock(&lock);
}
sys_mutex_unlock(&G.params.mutex);
return p;
}
INTERNAL void font_task_params_release(struct font_task_params *p)
{
sys_mutex_lock(&G.params.mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.params.mutex);
p->next_free = G.params.head_free;
G.params.head_free = p;
}
sys_mutex_unlock(&G.params.mutex);
sys_mutex_unlock(&lock);
}
/* ========================== *

View File

@ -73,28 +73,24 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(game_shutdown)
INTERNAL void push_cmds(struct game_cmd_array cmd_array)
{
sys_mutex_lock(&G.game_cmds_mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.game_cmds_mutex);
struct game_cmd *cmds = arena_push_array(&G.game_cmds_arena, struct game_cmd, cmd_array.count);
MEMCPY(cmds, cmd_array.cmds, cmd_array.count * sizeof(*cmds));
}
sys_mutex_unlock(&G.game_cmds_mutex);
sys_mutex_unlock(&lock);
}
INTERNAL struct game_cmd_array pop_cmds(struct arena *arena)
{
struct game_cmd_array array = { 0 };
if (G.game_cmds_arena.pos > 0) {
sys_mutex_lock(&G.game_cmds_mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.game_cmds_mutex);
struct buffer game_cmds_buff = arena_to_buffer(&G.game_cmds_arena);
arena_align(arena, alignof(struct game_cmd));
array.cmds = (struct game_cmd *)arena_push_array(arena, u8, game_cmds_buff.size);
array.count = game_cmds_buff.size / sizeof(struct game_cmd);
MEMCPY(array.cmds, game_cmds_buff.data, game_cmds_buff.size);
arena_reset(&G.game_cmds_arena);
}
sys_mutex_unlock(&G.game_cmds_mutex);
sys_mutex_unlock(&lock);
}
return array;
}
@ -106,12 +102,10 @@ INTERNAL struct game_cmd_array pop_cmds(struct arena *arena)
INTERNAL void publish_game_tick(void)
{
__prof;
sys_mutex_lock(&G.published_tick_mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.published_tick_mutex);
world_copy_replace(&G.published_tick, &G.world);
atomic_u64_eval_exchange(&G.published_tick_id, G.published_tick.tick_id);
}
sys_mutex_unlock(&G.published_tick_mutex);
sys_mutex_unlock(&lock);
}
INTERNAL void recalculate_world_xform_recurse(struct entity *parent)
@ -532,11 +526,9 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(game_thread_entry_point, arg)
void game_get_latest_tick(struct world *dest)
{
sys_mutex_lock(&G.published_tick_mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.published_tick_mutex);
world_copy_replace(dest, &G.published_tick);
}
sys_mutex_unlock(&G.published_tick_mutex);
sys_mutex_unlock(&lock);
}
u64 game_get_latest_tick_id(void)

View File

@ -75,14 +75,14 @@ struct log_startup_receipt log_startup(struct string logfile_path)
* ========================== */
void log_register_callback(log_event_callback_func *func)
{
if (!atomic_i32_eval(&G.initialized)) { return; }
sys_mutex_lock(&G.mutex);
{
/* TODO */
(UNUSED)func;
}
sys_mutex_unlock(&G.mutex);
#if 0
if (!atomic_i32_eval(&G.initialized)) { return; }
struct sys_lock lock = sys_mutex_lock_e(&G.mutex);
sys_mutex_unlock(&lock);
#endif
}
/* ========================== *

View File

@ -102,9 +102,9 @@ INTERNAL struct track *track_from_handle(struct mixer_track_handle handle)
}
}
INTERNAL struct track *track_alloc_assume_locked(struct sound *sound)
INTERNAL struct track *track_alloc_assume_locked(struct sys_lock *lock, struct sound *sound)
{
sys_mutex_assert_locked(&G.mutex);
sys_assert_locked_e(lock, &G.mutex);
struct track *track = NULL;
if (G.track_first_free) {
@ -140,9 +140,9 @@ INTERNAL struct track *track_alloc_assume_locked(struct sound *sound)
return track;
}
INTERNAL void track_release_assume_locked(struct track *track)
INTERNAL void track_release_assume_locked(struct sys_lock *lock, struct track *track)
{
sys_mutex_assert_locked(&G.mutex);
sys_assert_locked_e(lock, &G.mutex);
/* Remove from playing list */
struct track *prev = track->prev;
@ -187,12 +187,12 @@ struct mixer_track_handle mixer_play_ex(struct sound *sound, struct mixer_desc d
{
struct track *track;
{
sys_mutex_lock(&G.mutex);
struct sys_lock lock = sys_mutex_lock_e(&G.mutex);
{
track = track_alloc_assume_locked(sound);
track = track_alloc_assume_locked(&lock, sound);
track->desc = desc;
}
sys_mutex_unlock(&G.mutex);
sys_mutex_unlock(&lock);
}
return track_to_handle(track);
}
@ -205,7 +205,7 @@ struct mixer_desc mixer_track_get(struct mixer_track_handle handle)
struct track *track = track_from_handle(handle);
if (track) {
/* TODO: Only lock mutex on track itself or something */
sys_mutex_lock(&G.mutex);
struct sys_lock lock = sys_mutex_lock_e(&G.mutex);
{
/* Confirm handle is still valid now that we're locked */
track = track_from_handle(handle);
@ -213,7 +213,7 @@ struct mixer_desc mixer_track_get(struct mixer_track_handle handle)
res = track->desc;
}
}
sys_mutex_unlock(&G.mutex);
sys_mutex_unlock(&lock);
}
return res;
@ -225,7 +225,7 @@ void mixer_track_set(struct mixer_track_handle handle, struct mixer_desc desc)
struct track *track = track_from_handle(handle);
if (track) {
/* TODO: Only lock mutex on track itself or something */
sys_mutex_lock(&G.mutex);
struct sys_lock lock = sys_mutex_lock_e(&G.mutex);
{
/* Confirm handle is still valid now that we're locked */
track = track_from_handle(handle);
@ -233,18 +233,18 @@ void mixer_track_set(struct mixer_track_handle handle, struct mixer_desc desc)
track->desc = desc;
}
}
sys_mutex_unlock(&G.mutex);
sys_mutex_unlock(&lock);
}
}
void mixer_set_listener(struct v2 pos, struct v2 dir)
{
sys_mutex_lock(&G.mutex);
struct sys_lock lock = sys_mutex_lock_e(&G.mutex);
{
G.listener_pos = pos;
G.listener_dir = v2_norm(dir);
}
sys_mutex_unlock(&G.mutex);
sys_mutex_unlock(&lock);
}
/* ========================== *
@ -279,8 +279,9 @@ struct mixed_pcm_f32 mixer_update(struct arena *arena, u64 frame_count)
/* Create temp array of mixes */
struct mix **mixes = NULL;
u64 mixes_count = 0;
sys_mutex_lock(&G.mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.mutex);
/* Read listener info */
listener_pos = G.listener_pos;
listener_dir = G.listener_dir;
@ -293,8 +294,9 @@ struct mixed_pcm_f32 mixer_update(struct arena *arena, u64 frame_count)
mix->desc = track->desc;
mixes[mixes_count++] = mix;
}
sys_mutex_unlock(&lock);
}
sys_mutex_unlock(&G.mutex);
for (u64 mix_index = 0; mix_index < mixes_count; ++mix_index) {
__profscope(mix_track);
@ -463,21 +465,21 @@ struct mixed_pcm_f32 mixer_update(struct arena *arena, u64 frame_count)
}
}
sys_mutex_lock(&G.mutex);
{
__profscope(update_track_effect_data);
struct sys_lock lock = sys_mutex_lock_e(&G.mutex);
for (u64 i = 0; i < mixes_count; ++i) {
struct mix *mix = mixes[i];
struct track *track = track_from_handle(mix->track_handle);
if (track) {
if (mix->track_finished) {
/* Release finished tracks */
track_release_assume_locked(track);
track_release_assume_locked(&lock, track);
}
}
}
sys_mutex_unlock(&lock);
}
sys_mutex_unlock(&G.mutex);
scratch_end(scratch);
return res;

View File

@ -198,7 +198,7 @@ INTERNAL struct renderer_handle handle_alloc(void *data)
struct handle_store *store = &G.handle_store;
struct handle_slot *slot = NULL;
sys_mutex_lock(&store->mutex);
struct sys_lock lock = sys_mutex_lock_e(&store->mutex);
{
if (store->head_free) {
/* Take first from free list */
@ -217,7 +217,7 @@ INTERNAL struct renderer_handle handle_alloc(void *data)
}
slot->data = data;
}
sys_mutex_unlock(&store->mutex);
sys_mutex_unlock(&lock);
struct renderer_handle handle = HANDLE_CREATE(slot->idx, slot->gen);
return handle;
@ -231,7 +231,7 @@ INTERNAL void handle_release(struct renderer_handle handle)
u32 idx = HANDLE_IDX(handle);
u32 gen = HANDLE_GEN(handle);
sys_mutex_lock(&store->mutex);
struct sys_lock lock = sys_mutex_lock_e(&store->mutex);
{
if (idx < store->count) {
struct handle_slot *slot = &store->array[idx];
@ -256,7 +256,7 @@ INTERNAL void handle_release(struct renderer_handle handle)
ASSERT(false);
}
}
sys_mutex_unlock(&store->mutex);
sys_mutex_unlock(&lock);
}
INTERNAL void *handle_data(struct renderer_handle handle)

View File

@ -57,28 +57,24 @@ INTERNAL struct sound_task_params *sound_task_params_alloc(void)
{
struct sound_task_params *p = NULL;
{
sys_mutex_lock(&G.params.mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.params.mutex);
if (G.params.head_free) {
p = G.params.head_free;
G.params.head_free = p->next_free;
} else {
p = arena_push_zero(&G.params.arena, struct sound_task_params);
}
}
sys_mutex_unlock(&G.params.mutex);
sys_mutex_unlock(&lock);
}
return p;
}
INTERNAL void sound_task_params_release(struct sound_task_params *p)
{
sys_mutex_lock(&G.params.mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.params.mutex);
p->next_free = G.params.head_free;
G.params.head_free = p;
}
sys_mutex_unlock(&G.params.mutex);
sys_mutex_unlock(&lock);
}
/* ========================== *

View File

@ -95,7 +95,7 @@ struct cache_node {
};
struct cache_bucket {
struct sys_rw_mutex rw_mutex;
struct sys_mutex mutex;
struct cache_node *first;
};
@ -242,7 +242,7 @@ struct sprite_startup_receipt sprite_startup(struct renderer_startup_receipt *re
G.cache.arena = arena_alloc(GIGABYTE(64));
G.cache.buckets = arena_push_array_zero(&G.cache.arena, struct cache_bucket, CACHE_BUCKETS_COUNT);
for (u64 i = 0; i < CACHE_BUCKETS_COUNT; ++i) {
G.cache.buckets[i].rw_mutex = sys_rw_mutex_alloc();
G.cache.buckets[i].mutex = sys_mutex_alloc();
}
G.load_cmds_arena = arena_alloc(GIGABYTE(64));
@ -261,14 +261,13 @@ struct sprite_startup_receipt sprite_startup(struct renderer_startup_receipt *re
INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(sprite_shutdown)
{
__prof;
/* Signal evictor shutdown */
sys_mutex_lock(&G.evictor_mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.evictor_mutex);
G.evictor_shutdown = true;
sys_condition_variable_broadcast(&G.evictor_cv);
sys_mutex_unlock(&lock);
}
sys_mutex_unlock(&G.evictor_mutex);
sys_thread_wait_release(&G.evictor_thread);
}
@ -573,8 +572,8 @@ INTERNAL struct cache_node *node_lookup_touch(struct sprite_scope *scope, struct
/* Lookup */
/* TODO: Spinlock */
sys_rw_mutex_lock_shared(&bucket->rw_mutex);
{
struct sys_lock lock = sys_mutex_lock_s(&bucket->mutex);
nonmatching_next = &bucket->first;
n = *nonmatching_next;
while (n) {
@ -587,17 +586,17 @@ INTERNAL struct cache_node *node_lookup_touch(struct sprite_scope *scope, struct
n = *nonmatching_next;
}
}
sys_mutex_unlock(&lock);
}
sys_rw_mutex_unlock_shared(&bucket->rw_mutex);
/* Allocate new node if necessary */
if (!n) {
__profscope(node_lookup_allocate);
sys_rw_mutex_lock_exclusive(&bucket->rw_mutex);
struct sys_lock bucket_lock = sys_mutex_lock_e(&bucket->mutex);
{
/* Alloc node */
sys_mutex_lock(&G.cache.node_pool_mutex);
{
struct sys_lock pool_lock = sys_mutex_lock_e(&G.cache.node_pool_mutex);
if (G.cache.node_pool_first_free) {
n = G.cache.node_pool_first_free;
G.cache.node_pool_first_free = n->next_free;
@ -605,8 +604,8 @@ INTERNAL struct cache_node *node_lookup_touch(struct sprite_scope *scope, struct
} else {
n = arena_push_zero(&G.cache.arena, struct cache_node);
}
sys_mutex_unlock(&pool_lock);
}
sys_mutex_unlock(&G.cache.node_pool_mutex);
/* Init node and add to bucket */
scope_ensure_reference(scope, n, cache_bucket_index);
*nonmatching_next = n;
@ -619,7 +618,7 @@ INTERNAL struct cache_node *node_lookup_touch(struct sprite_scope *scope, struct
n->texture = G.nil_texture;
n->sheet = G.nil_sheet;
}
sys_rw_mutex_unlock_exclusive(&bucket->rw_mutex);
sys_mutex_unlock(&bucket_lock);
}
return n;
@ -657,8 +656,8 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
} break;
}
} else {
sys_mutex_lock(&G.load_cmds_mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.load_cmds_mutex);
/* Allocate cmd */
struct load_cmd *cmd = NULL;
if (G.first_free_load_cmd) {
@ -683,8 +682,8 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
/* Push work */
work_push_task(&sprite_load_task, cmd, WORK_PRIORITY_NORMAL);
}
sys_mutex_unlock(&G.load_cmds_mutex);
sys_mutex_unlock(&lock);
}
}
@ -774,12 +773,12 @@ INTERNAL WORK_TASK_FUNC_DEF(sprite_load_task, arg)
/* Free cmd */
node_refcount_add(n, -1);
sys_mutex_lock(&G.load_cmds_mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.load_cmds_mutex);
cmd->next_free = G.first_free_load_cmd;
G.first_free_load_cmd = cmd;
sys_mutex_unlock(&lock);
}
sys_mutex_unlock(&G.load_cmds_mutex);
}
/* ========================== *
@ -801,8 +800,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
{
(UNUSED)arg;
sys_mutex_lock(&G.evictor_mutex);
{
struct sys_lock evictor_lock = sys_mutex_lock_e(&G.evictor_mutex);
while (!G.evictor_shutdown) {
struct temp_arena scratch = scratch_begin_no_conflict();
struct evict_node *head_consider = NULL;
@ -817,7 +815,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
__profscope(eviction_scan);
for (u64 i = 0; i < CACHE_BUCKETS_COUNT; ++i) {
struct cache_bucket *bucket = &G.cache.buckets[i];
sys_rw_mutex_lock_shared(&bucket->rw_mutex);
struct sys_lock bucket_lock = sys_mutex_lock_s(&bucket->mutex);
{
struct cache_node *n = bucket->first;
while (n) {
@ -883,8 +881,8 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
n = n->next_hash;
}
sys_rw_mutex_unlock_shared(&bucket->rw_mutex);
}
sys_mutex_unlock(&bucket_lock);
}
}
@ -916,7 +914,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
for (struct evict_node *en = head_consider_lru; en && !stop_evicting; en = en->next_consider_lru) {
struct cache_bucket *bucket = en->cache_bucket;
struct cache_node *n = en->cache_node;
sys_rw_mutex_lock_exclusive(&bucket->rw_mutex);
struct sys_lock bucket_lock = sys_mutex_lock_e(&bucket->mutex);
{
struct cache_node_refcount refcount = *(struct cache_node_refcount *)atomic_u64_raw(&n->refcount_struct);
if (refcount.count > 0 || ((refcount.last_modified_cycle != en->refcount.last_modified_cycle) && !en->force_evict)) {
@ -940,7 +938,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
stop_evicting = true;
}
}
sys_rw_mutex_unlock_exclusive(&bucket->rw_mutex);
sys_mutex_unlock(&bucket_lock);
}
}
@ -958,24 +956,23 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
}
/* Add evicted nodes to free list */
sys_mutex_lock(&G.cache.node_pool_mutex);
{
__profscope(eviction_free_list_append);
struct sys_lock pool_lock = sys_mutex_lock_e(&G.cache.node_pool_mutex);
for (struct evict_node *en = head_evicted; en; en = en->next_evicted) {
struct cache_node *n = en->cache_node;
n->next_free = G.cache.node_pool_first_free;
G.cache.node_pool_first_free = n;
}
sys_mutex_unlock(&pool_lock);
}
sys_mutex_unlock(&G.cache.node_pool_mutex);
}
}
atomic_u32_inc_eval(&G.evictor_cycle);
scratch_end(scratch);
/* Wait */
sys_condition_variable_wait_time(&G.evictor_cv, &G.evictor_mutex, EVICTOR_CYCLE_INTERVAL);
sys_condition_variable_wait_time(&G.evictor_cv, &evictor_lock, EVICTOR_CYCLE_INTERVAL);
}
}
sys_mutex_unlock(&G.evictor_mutex);
sys_mutex_unlock(&evictor_lock);
}

View File

@ -334,48 +334,27 @@ struct sys_mutex {
u64 handle;
#if RTC
u64 owner_tid;
struct atomic_i64 count;
#endif
};
struct sys_lock {
b32 exclusive;
struct sys_mutex *mutex;
};
struct sys_mutex sys_mutex_alloc(void);
void sys_mutex_release(struct sys_mutex *mutex);
void sys_mutex_lock(struct sys_mutex *mutex);
void sys_mutex_unlock(struct sys_mutex *mutex);
struct sys_lock sys_mutex_lock_e(struct sys_mutex *mutex);
struct sys_lock sys_mutex_lock_s(struct sys_mutex *mutex);
void sys_mutex_unlock(struct sys_lock *lock);
#if RTC
void sys_mutex_assert_locked(struct sys_mutex *mutex);
void sys_assert_locked_e(struct sys_lock *lock, struct sys_mutex *mutex);
void sys_assert_locked_s(struct sys_lock *lock, struct sys_mutex *mutex);
#else
# define sys_mutex_assert_locked(m)
#endif
/* ========================== *
* RW Mutex
* ========================== */
struct sys_rw_mutex {
u64 handle;
#if RTC
struct atomic_i64 num_shared;
u64 owner_tid;
# if _WIN32
wchar_t *owner_name;
# else
char *owner_name;
# endif
#endif
};
struct sys_rw_mutex sys_rw_mutex_alloc(void);
void sys_rw_mutex_release(struct sys_rw_mutex *mutex);
void sys_rw_mutex_lock_exclusive(struct sys_rw_mutex *mutex);
void sys_rw_mutex_unlock_exclusive(struct sys_rw_mutex *mutex);
void sys_rw_mutex_lock_shared(struct sys_rw_mutex *mutex);
void sys_rw_mutex_unlock_shared(struct sys_rw_mutex *mutex);
#if RTC
void sys_rw_mutex_assert_locked_exclusive(struct sys_rw_mutex *mutex);
#else
# define sys_rw_mutex_assert_locked_exclusive(m)
# define sys_assert_locked_e(l, m)
# define sys_assert_locked_s(l, m)
#endif
/* ========================== *
@ -391,8 +370,8 @@ struct sys_condition_variable {
struct sys_condition_variable sys_condition_variable_alloc(void);
void sys_condition_variable_release(struct sys_condition_variable *cv);
void sys_condition_variable_wait(struct sys_condition_variable *cv, struct sys_mutex *mutex);
void sys_condition_variable_wait_time(struct sys_condition_variable *cv, struct sys_mutex *mutex, f64 seconds);
void sys_condition_variable_wait(struct sys_condition_variable *cv, struct sys_lock *lock);
void sys_condition_variable_wait_time(struct sys_condition_variable *cv, struct sys_lock *lock, f64 seconds);
void sys_condition_variable_signal(struct sys_condition_variable *cv, u32 count);
void sys_condition_variable_broadcast(struct sys_condition_variable *cv);

View File

@ -68,7 +68,7 @@ struct win32_window {
u16 utf16_high_surrogate_last_input;
struct sys_rw_mutex settings_rw_mutex;
struct sys_mutex settings_mutex;
struct sys_window_settings settings;
i32 monitor_width;
@ -119,7 +119,7 @@ GLOBAL struct {
struct win32_condition_variable *first_free_condition_variable;
/* Thread params */
struct sys_rw_mutex threads_rw_mutex;
struct sys_mutex threads_mutex;
struct arena threads_arena;
struct win32_thread *threads_first;
struct win32_thread *threads_last;
@ -674,13 +674,11 @@ INTERNAL void win32_update_window_from_system(struct win32_window *window);
INTERNAL void win32_window_process_event(struct win32_window *window, struct sys_event event)
{
sys_mutex_lock(&window->event_callbacks_mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&window->event_callbacks_mutex);
for (u64 i = 0; i < window->event_callbacks_count; ++i) {
window->event_callbacks[i](event);
}
}
sys_mutex_unlock(&window->event_callbacks_mutex);
sys_mutex_unlock(&lock);
}
INTERNAL HWND win32_create_window(struct win32_window *window)
@ -802,24 +800,23 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(window_thread_entry_point, arg)
INTERNAL struct win32_window *win32_window_alloc(void)
{
struct win32_window *window = NULL;
sys_mutex_lock(&G.windows_mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.windows_mutex);
if (G.first_free_window) {
window = G.first_free_window;
G.first_free_window = window->next_free;
} else {
window = arena_push(&G.windows_arena, struct win32_window);
}
sys_mutex_unlock(&lock);
}
sys_mutex_unlock(&G.windows_mutex);
MEMZERO_STRUCT(window);
/* Allocate sync flag */
window->ready_sf = sync_flag_alloc();
/* Allocate mutexes */
window->settings_rw_mutex = sys_rw_mutex_alloc();
window->settings_mutex = sys_mutex_alloc();
window->event_callbacks_mutex = sys_mutex_alloc();
/* Start window thread for processing events */
@ -833,8 +830,8 @@ INTERNAL struct win32_window *win32_window_alloc(void)
INTERNAL void win32_window_release(struct win32_window *window)
{
sys_mutex_lock(&G.windows_mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.windows_mutex);
window->next_free = G.first_free_window;
G.first_free_window = window;
@ -844,12 +841,12 @@ INTERNAL void win32_window_release(struct win32_window *window)
/* Release mutexes */
sys_mutex_release(&window->event_callbacks_mutex);
sys_rw_mutex_release(&window->settings_rw_mutex);
sys_mutex_release(&window->settings_mutex);
/* Release sync flag */
sync_flag_release(&window->ready_sf);
}
sys_mutex_unlock(&G.windows_mutex);
sys_mutex_unlock(&lock);
}
INTERNAL void win32_update_window_from_system(struct win32_window *window)
@ -1211,8 +1208,7 @@ void sys_window_release(struct sys_window *sys_window)
void sys_window_register_event_callback(struct sys_window *sys_window, sys_window_event_callback_func *func)
{
struct win32_window *window = (struct win32_window *)sys_window->handle;
sys_mutex_lock(&window->event_callbacks_mutex);
struct sys_lock lock = sys_mutex_lock_e(&window->event_callbacks_mutex);
{
if (window->event_callbacks_count + 1 > ARRAY_COUNT(window->event_callbacks)) {
sys_panic(STR("Too many window event callbacks registered"));
@ -1220,14 +1216,14 @@ void sys_window_register_event_callback(struct sys_window *sys_window, sys_windo
window->event_callbacks[window->event_callbacks_count++] = func;
}
}
sys_mutex_unlock(&window->event_callbacks_mutex);
sys_mutex_unlock(&lock);
}
void sys_window_unregister_event_callback(struct sys_window *sys_window, sys_window_event_callback_func *func)
{
struct win32_window *window = (struct win32_window *)sys_window->handle;
sys_mutex_lock(&window->event_callbacks_mutex);
struct sys_lock lock = sys_mutex_lock_e(&window->event_callbacks_mutex);
{
u64 count = window->event_callbacks_count;
sys_window_event_callback_func *last = count > 0 ? window->event_callbacks[count - 1] : NULL;
@ -1242,18 +1238,18 @@ void sys_window_unregister_event_callback(struct sys_window *sys_window, sys_win
}
}
}
sys_mutex_unlock(&window->event_callbacks_mutex);
sys_mutex_unlock(&lock);
}
void sys_window_update_settings(struct sys_window *sys_window, struct sys_window_settings *settings)
{
__prof;
struct win32_window *window = (struct win32_window *)sys_window->handle;
sys_rw_mutex_lock_exclusive(&window->settings_rw_mutex);
struct sys_lock lock = sys_mutex_lock_e(&window->settings_mutex);
{
win32_update_window_from_settings(window, settings);
}
sys_rw_mutex_unlock_exclusive(&window->settings_rw_mutex);
sys_mutex_unlock(&lock);
}
/* FIXME: Lock settings mutex for these functions */
@ -1268,7 +1264,7 @@ void sys_window_show(struct sys_window *sys_window)
{
struct win32_window *window = (struct win32_window *)sys_window->handle;
HWND hwnd = window->hwnd;
sys_rw_mutex_lock_exclusive(&window->settings_rw_mutex);
struct sys_lock lock = sys_mutex_lock_e(&window->settings_mutex);
{
i32 show_cmd = SW_NORMAL;
struct sys_window_settings *settings = &window->settings;
@ -1281,7 +1277,7 @@ void sys_window_show(struct sys_window *sys_window)
ShowWindow(hwnd, show_cmd);
BringWindowToTop(hwnd);
}
sys_rw_mutex_unlock_exclusive(&window->settings_rw_mutex);
sys_mutex_unlock(&lock);
}
struct v2 sys_window_get_size(struct sys_window *sys_window)
@ -1357,101 +1353,61 @@ void sys_mutex_release(struct sys_mutex *mutex)
{
__prof;
(UNUSED)mutex;
/* Mutex must be unlocked */
ASSERT(mutex->owner_tid == 0);
/* Mutex should be unlocked */
ASSERT(atomic_i64_eval(&mutex->count) == 0);
}
void sys_mutex_lock(struct sys_mutex *mutex)
struct sys_lock sys_mutex_lock_e(struct sys_mutex *mutex)
{
__prof;
AcquireSRWLockExclusive((SRWLOCK *)&mutex->handle);
#if RTC
mutex->owner_tid = (u64)GetCurrentThreadId();
atomic_i64_inc_eval(&mutex->count);
#endif
struct sys_lock lock = { 0 };
lock.exclusive = true;
lock.mutex = mutex;
return lock;
}
void sys_mutex_unlock(struct sys_mutex *mutex)
{
__prof;
#if RTC
mutex->owner_tid = 0;
#endif
ReleaseSRWLockExclusive((SRWLOCK *)&mutex->handle);
}
#if RTC
void sys_mutex_assert_locked(struct sys_mutex *mutex)
{
ASSERT(mutex->owner_tid == (u64)GetCurrentThreadId());
}
#endif
/* ========================== *
* RW Mutex
* ========================== */
struct sys_rw_mutex sys_rw_mutex_alloc(void)
{
__prof;
SRWLOCK srwlock;
InitializeSRWLock(&srwlock);
struct sys_rw_mutex mutex = {
.handle = *(u64 *)&srwlock
};
return mutex;
}
void sys_rw_mutex_release(struct sys_rw_mutex *mutex)
{
__prof;
(UNUSED)mutex;
/* Mutex must be unlocked */
ASSERT(mutex->owner_tid == 0);
ASSERT(atomic_i64_eval(&mutex->num_shared) == 0);
}
void sys_rw_mutex_lock_exclusive(struct sys_rw_mutex *mutex)
{
__prof;
AcquireSRWLockExclusive((SRWLOCK *)&mutex->handle);
#if RTC
mutex->owner_tid = (u64)GetCurrentThreadId();
GetThreadDescription(GetCurrentThread(), &mutex->owner_name);
#endif
}
void sys_rw_mutex_unlock_exclusive(struct sys_rw_mutex *mutex)
{
__prof;
#if RTC
mutex->owner_name = L"None";
mutex->owner_tid = 0;
#endif
ReleaseSRWLockExclusive((SRWLOCK *)&mutex->handle);
}
void sys_rw_mutex_lock_shared(struct sys_rw_mutex *mutex)
struct sys_lock sys_mutex_lock_s(struct sys_mutex *mutex)
{
__prof;
AcquireSRWLockShared((SRWLOCK *)&mutex->handle);
#if RTC
atomic_i64_inc_eval(&mutex->num_shared);
atomic_i64_inc_eval(&mutex->count);
#endif
struct sys_lock lock = { 0 };
lock.mutex = mutex;
return lock;
}
void sys_rw_mutex_unlock_shared(struct sys_rw_mutex *mutex)
void sys_mutex_unlock(struct sys_lock *lock)
{
__prof;
#if RTC
atomic_i64_dec_eval(&mutex->num_shared);
atomic_i64_dec_eval(&lock->mutex->count);
lock->mutex->owner_tid = 0;
#endif
ReleaseSRWLockShared((SRWLOCK *)&mutex->handle);
if (lock->exclusive) {
ReleaseSRWLockExclusive((SRWLOCK *)&lock->mutex->handle);
} else {
ReleaseSRWLockShared((SRWLOCK *)&lock->mutex->handle);
}
MEMZERO_STRUCT(lock);
}
#if RTC
void sys_rw_mutex_assert_locked_exclusive(struct sys_rw_mutex *mutex)
void sys_assert_locked_e(struct sys_lock *lock, struct sys_mutex *mutex)
{
ASSERT(mutex->owner_tid == (u64)GetCurrentThreadId());
ASSERT(lock->mutex == mutex);
ASSERT(lock->exclusive == true);
}
void sys_assert_locked_s(struct sys_lock *lock, struct sys_mutex *mutex)
{
ASSERT(lock->mutex == mutex);
}
#endif
@ -1464,16 +1420,14 @@ INTERNAL struct win32_condition_variable *win32_condition_variable_alloc(void)
__prof;
struct win32_condition_variable *cv = NULL;
{
sys_mutex_lock(&G.condition_variables_mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.condition_variables_mutex);
if (G.first_free_condition_variable) {
cv = G.first_free_condition_variable;
G.first_free_condition_variable = cv->next_free;
} else {
cv = arena_push_zero(&G.condition_variables_arena, struct win32_condition_variable);
}
}
sys_mutex_unlock(&G.condition_variables_mutex);
sys_mutex_unlock(&lock);
}
MEMZERO_STRUCT(cv);
@ -1485,13 +1439,10 @@ INTERNAL struct win32_condition_variable *win32_condition_variable_alloc(void)
INTERNAL void win32_condition_variable_release(struct win32_condition_variable *w32cv)
{
__prof;
sys_mutex_lock(&G.condition_variables_mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.condition_variables_mutex);
w32cv->next_free = G.first_free_condition_variable;
G.first_free_condition_variable = w32cv;
}
sys_mutex_unlock(&G.condition_variables_mutex);
sys_mutex_unlock(&lock);
}
struct sys_condition_variable sys_condition_variable_alloc(void)
@ -1511,33 +1462,49 @@ void sys_condition_variable_release(struct sys_condition_variable *cv)
win32_condition_variable_release((struct win32_condition_variable *)cv->handle);
}
void sys_condition_variable_wait(struct sys_condition_variable *cv, struct sys_mutex *mutex)
void sys_condition_variable_wait(struct sys_condition_variable *cv, struct sys_lock *lock)
{
__prof;
struct sys_mutex *mutex = lock->mutex;
b32 exclusive = lock->exclusive;
#if RTC
atomic_i64_inc_eval(&cv->num_waiters);
if (exclusive) {
mutex->owner_tid = 0;
}
atomic_i64_dec_eval(&mutex->count);
#endif
struct win32_condition_variable *w32cv = (struct win32_condition_variable *)cv->handle;
SleepConditionVariableSRW(&w32cv->condition_variable, (SRWLOCK *)&mutex->handle, INFINITE, 0);
SleepConditionVariableSRW(&w32cv->condition_variable, (SRWLOCK *)&mutex->handle, INFINITE, exclusive ? 0 : CONDITION_VARIABLE_LOCKMODE_SHARED);
#if RTC
atomic_i64_inc_eval(&mutex->count);
if (exclusive) {
mutex->owner_tid = (u64)GetCurrentThreadId();
}
atomic_i64_dec_eval(&cv->num_waiters);
#endif
}
void sys_condition_variable_wait_time(struct sys_condition_variable *cv, struct sys_mutex *mutex, f64 seconds)
void sys_condition_variable_wait_time(struct sys_condition_variable *cv, struct sys_lock *lock, f64 seconds)
{
__prof;
struct sys_mutex *mutex = lock->mutex;
b32 exclusive = lock->exclusive;
#if RTC
atomic_i64_inc_eval(&cv->num_waiters);
if (exclusive) {
mutex->owner_tid = 0;
}
atomic_i64_dec_eval(&mutex->count);
#endif
struct win32_condition_variable *w32cv = (struct win32_condition_variable *)cv->handle;
u32 ms = (u32)math_round_to_int((f32)seconds * 1000.f);
SleepConditionVariableSRW(&w32cv->condition_variable, (SRWLOCK *)&mutex->handle, ms, 0);
SleepConditionVariableSRW(&w32cv->condition_variable, (SRWLOCK *)&mutex->handle, ms, exclusive ? 0 : CONDITION_VARIABLE_LOCKMODE_SHARED);
#if RTC
atomic_i64_inc_eval(&mutex->count);
if (exclusive) {
mutex->owner_tid = (u64)GetCurrentThreadId();
}
atomic_i64_dec_eval(&cv->num_waiters);
#endif
}
@ -1610,8 +1577,10 @@ struct thread_local_store *sys_thread_get_thread_local_store(void)
* Threads
* ========================== */
INTERNAL struct win32_thread *win32_thread_alloc_assume_locked(void)
INTERNAL struct win32_thread *win32_thread_alloc_assume_locked(struct sys_lock *lock)
{
sys_assert_locked_e(lock, &G.threads_mutex);
struct win32_thread *t = NULL;
if (G.threads_first_free) {
t = G.threads_first_free;
@ -1631,8 +1600,10 @@ INTERNAL struct win32_thread *win32_thread_alloc_assume_locked(void)
return t;
}
INTERNAL void win32_thread_release_assume_locked(struct win32_thread *t)
INTERNAL void win32_thread_release_assume_locked(struct sys_lock *lock, struct win32_thread *t)
{
sys_assert_locked_e(lock, &G.threads_mutex);
if (t->prev) {
t->prev->next = t->next;
}
@ -1652,8 +1623,10 @@ INTERNAL void win32_thread_release_assume_locked(struct win32_thread *t)
};
}
INTERNAL struct win32_thread *win32_thread_from_sys_thread_assume_locked(struct sys_thread st)
INTERNAL struct win32_thread *win32_thread_from_sys_thread_assume_locked(struct sys_lock *lock, struct sys_thread st)
{
sys_assert_locked_s(lock, &G.threads_mutex);
u64 gen = st.handle[0];
struct win32_thread *t = (struct win32_thread *)st.handle[1];
if (t->gen == gen) {
@ -1663,8 +1636,10 @@ INTERNAL struct win32_thread *win32_thread_from_sys_thread_assume_locked(struct
}
}
INTERNAL struct sys_thread sys_thread_from_win32_thread_assume_locked(struct win32_thread *t)
INTERNAL struct sys_thread sys_thread_from_win32_thread_assume_locked(struct sys_lock *lock, struct win32_thread *t)
{
sys_assert_locked_s(lock, &G.threads_mutex);
return (struct sys_thread) {
.handle[0] = t->gen,
.handle[1] = (u64)t
@ -1697,11 +1672,11 @@ INTERNAL DWORD WINAPI win32_thread_proc(LPVOID vt)
t->entry_point(t->thread_data);
/* Release thread object */
sys_rw_mutex_lock_exclusive(&G.threads_rw_mutex);
struct sys_lock lock = sys_mutex_lock_e(&G.threads_mutex);
{
win32_thread_release_assume_locked(t);
win32_thread_release_assume_locked(&lock, t);
}
sys_rw_mutex_unlock_exclusive(&G.threads_rw_mutex);
sys_mutex_unlock(&lock);
/* Release TLS */
win32_tls_release(&tls);
@ -1718,10 +1693,10 @@ struct sys_thread sys_thread_alloc(sys_thread_entry_point_func *entry_point, voi
logf_info("Creating thread \"%F\"", FMT_STR(thread_name));
struct sys_thread res = { 0 };
sys_rw_mutex_lock_exclusive(&G.threads_rw_mutex);
struct sys_lock lock = sys_mutex_lock_e(&G.threads_mutex);
{
/* Allocate thread object */
struct win32_thread *t = win32_thread_alloc_assume_locked();
struct win32_thread *t = win32_thread_alloc_assume_locked(&lock);
t->entry_point = entry_point;
t->thread_data = thread_data;
@ -1741,9 +1716,10 @@ struct sys_thread sys_thread_alloc(sys_thread_entry_point_func *entry_point, voi
sys_panic(STR("Failed to create thread"));
}
res = sys_thread_from_win32_thread_assume_locked(t);
res = sys_thread_from_win32_thread_assume_locked(&lock, t);
}
sys_rw_mutex_unlock_exclusive(&G.threads_rw_mutex);
sys_mutex_unlock(&lock);
return res;
}
@ -1752,14 +1728,14 @@ void sys_thread_wait_release(struct sys_thread *thread)
HANDLE handle = 0;
/* Lookup */
sys_rw_mutex_lock_shared(&G.threads_rw_mutex);
struct sys_lock lock = sys_mutex_lock_s(&G.threads_mutex);
{
struct win32_thread *t = win32_thread_from_sys_thread_assume_locked(*thread);
struct win32_thread *t = win32_thread_from_sys_thread_assume_locked(&lock, *thread);
if (t) {
handle = t->handle;
}
}
sys_rw_mutex_unlock_shared(&G.threads_rw_mutex);
sys_mutex_unlock(&lock);
/* Wait */
if (handle) {
@ -2082,7 +2058,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
G.condition_variables_arena = arena_alloc(GIGABYTE(64));
/* Set up threads */
G.threads_rw_mutex = sys_rw_mutex_alloc();
G.threads_mutex = sys_mutex_alloc();
G.threads_arena = arena_alloc(GIGABYTE(64));
/* Set up windows */
@ -2154,12 +2130,12 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
/* Get app thread handle */
HANDLE app_thread_handle = 0;
sys_rw_mutex_lock_shared(&G.threads_rw_mutex);
struct sys_lock lock = sys_mutex_lock_s(&G.threads_mutex);
{
struct win32_thread *wt = win32_thread_from_sys_thread_assume_locked(app_thread);
struct win32_thread *wt = win32_thread_from_sys_thread_assume_locked(&lock, app_thread);
app_thread_handle = wt->handle;
}
sys_rw_mutex_unlock_shared(&G.threads_rw_mutex);
sys_mutex_unlock(&lock);
/* Wait for either app thread exit or panic */
if (app_thread_handle) {

View File

@ -151,7 +151,7 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(user_shutdown)
INTERNAL struct sys_event_array pop_sys_events(struct arena *arena)
{
struct sys_event_array array = { 0 };
sys_mutex_lock(&G.sys_events_mutex);
struct sys_lock lock = sys_mutex_lock_e(&G.sys_events_mutex);
{
struct buffer events_buff = arena_to_buffer(&G.sys_events_arena);
arena_align(arena, alignof(struct sys_event));
@ -160,17 +160,17 @@ INTERNAL struct sys_event_array pop_sys_events(struct arena *arena)
MEMCPY(array.events, events_buff.data, events_buff.size);
arena_reset(&G.sys_events_arena);
}
sys_mutex_unlock(&G.sys_events_mutex);
sys_mutex_unlock(&lock);
return array;
}
INTERNAL SYS_WINDOW_EVENT_CALLBACK_FUNC_DEF(window_event_callback, event)
{
sys_mutex_lock(&G.sys_events_mutex);
struct sys_lock lock = sys_mutex_lock_e(&G.sys_events_mutex);
{
*arena_push(&G.sys_events_arena, struct sys_event) = event;
}
sys_mutex_unlock(&G.sys_events_mutex);
sys_mutex_unlock(&lock);
}
/* ========================== *

View File

@ -126,7 +126,6 @@ INLINE void *fixed_dict_get(const struct fixed_dict *dict, struct string key)
* ========================== */
struct sync_flag {
/* TODO: Make this a rw mutex? */
struct sys_mutex mutex;
struct sys_condition_variable cv;
b32 flag;
@ -149,24 +148,20 @@ INLINE void sync_flag_release(struct sync_flag *sf)
INLINE void sync_flag_set(struct sync_flag *sf)
{
__prof;
sys_mutex_lock(&sf->mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&sf->mutex);
sf->flag = 1;
sys_condition_variable_broadcast(&sf->cv);
}
sys_mutex_unlock(&sf->mutex);
sys_mutex_unlock(&lock);
}
INLINE void sync_flag_wait(struct sync_flag *sf)
{
__prof;
sys_mutex_lock(&sf->mutex);
{
while (sf->flag == 0) {
sys_condition_variable_wait(&sf->cv, &sf->mutex);
struct sys_lock lock = sys_mutex_lock_s(&sf->mutex);
while (sf->flag != 1) {
sys_condition_variable_wait(&sf->cv, &lock);
}
}
sys_mutex_unlock(&sf->mutex);
sys_mutex_unlock(&lock);
}
/* ========================== *
@ -185,32 +180,4 @@ INLINE void sleep_frame(sys_timestamp_t last_frame_time, f64 target_dt)
}
}
#if 0
/* ========================== *
* Sync buffer
* ========================== */
struct sync_buff {
struct arena arena;
struct sys_mutex mutex;
};
struct sync_buff sync_buff_alloc(u64 arena_reserve)
{
}
void sync_buff_release(struct sync_buff *sb)
{
}
INLINE void sync_buff_read(void)
{
}
INLINE void sync_buff_write(void)
{
}
#endif
#endif

View File

@ -24,11 +24,6 @@
* also do work themselves (IE: callers of "work_wait")
*/
/* NOTE:
* Functions suffixed with "assume_locked" require `G.mutex` to be
* locked & unlocked by the caller.
*/
struct worker {
struct sys_thread thread;
struct worker *next;
@ -122,7 +117,7 @@ struct work_startup_receipt work_startup(u32 num_worker_threads)
app_register_exit_callback(&work_shutdown);
/* Initialize threads */
sys_mutex_lock(&G.mutex);
struct sys_lock lock = sys_mutex_lock_e(&G.mutex);
{
struct worker *prev = NULL;
for (u32 i = 0; i < num_worker_threads; ++i) {
@ -140,7 +135,7 @@ struct work_startup_receipt work_startup(u32 num_worker_threads)
prev = worker;
}
}
sys_mutex_unlock(&G.mutex);
sys_mutex_unlock(&lock);
scratch_end(scratch);
@ -150,12 +145,14 @@ struct work_startup_receipt work_startup(u32 num_worker_threads)
INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(work_shutdown)
{
__prof;
sys_mutex_lock(&G.mutex);
struct sys_lock lock = sys_mutex_lock_e(&G.mutex);
{
G.workers_shutdown = true;
sys_condition_variable_broadcast(&G.cv);
}
sys_mutex_unlock(&G.mutex);
sys_mutex_unlock(&lock);
for (struct worker *worker = G.worker_head; worker; worker = worker->next) {
sys_thread_wait_release(&worker->thread);
}
@ -165,10 +162,11 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(work_shutdown)
* Internal work / task allocation
* ========================== */
INTERNAL struct work *work_alloc_assume_locked(void)
INTERNAL struct work *work_alloc_assume_locked(struct sys_lock *lock)
{
__prof;
sys_mutex_assert_locked(&G.mutex);
sys_assert_locked_e(lock, &G.mutex);
struct work *work = NULL;
/* Allocate work */
@ -191,26 +189,29 @@ INTERNAL struct work *work_alloc_assume_locked(void)
return work;
}
INTERNAL void work_release_assume_locked(struct work *work)
INTERNAL void work_release_assume_locked(struct sys_lock *lock, struct work *work)
{
sys_mutex_assert_locked(&G.mutex);
sys_assert_locked_e(lock, &G.mutex);
work->next_free = G.free_work_head;
G.free_work_head = work;
++work->gen;
}
INTERNAL struct work_handle work_to_handle_assume_locked(struct work *work)
INTERNAL struct work_handle work_to_handle_assume_locked(struct sys_lock *lock, struct work *work)
{
sys_mutex_assert_locked(&G.mutex);
sys_assert_locked_e(lock, &G.mutex);
return (struct work_handle) {
.work = work,
.gen = work->gen
};
}
INTERNAL struct work_task *task_alloc_assume_locked(void)
INTERNAL struct work_task *task_alloc_assume_locked(struct sys_lock *lock)
{
sys_mutex_assert_locked(&G.mutex);
sys_assert_locked_e(lock, &G.mutex);
struct work_task *task = NULL;
/* Allocate task */
@ -227,9 +228,10 @@ INTERNAL struct work_task *task_alloc_assume_locked(void)
return task;
}
INTERNAL void task_release_assume_locked(struct work_task *task)
INTERNAL void task_release_assume_locked(struct sys_lock *lock, struct work_task *task)
{
sys_mutex_assert_locked(&G.mutex);
sys_assert_locked_e(lock, &G.mutex);
task->next_free = G.free_task_head;
G.free_task_head = task;
}
@ -238,10 +240,11 @@ INTERNAL void task_release_assume_locked(struct work_task *task)
* Work scheduling / insertion
* ========================== */
INTERNAL void work_schedule_assume_locked(struct work *work)
INTERNAL void work_schedule_assume_locked(struct sys_lock *lock, struct work *work)
{
__prof;
sys_mutex_assert_locked(&G.mutex);
sys_assert_locked_e(lock, &G.mutex);
enum work_priority priority = work->priority;
if (G.scheduled_work_head) {
@ -275,10 +278,10 @@ INTERNAL void work_schedule_assume_locked(struct work *work)
sys_condition_variable_signal(&G.cv, work->tasks_incomplete);
}
INTERNAL void work_unschedule_assume_locked(struct work *work)
INTERNAL void work_unschedule_assume_locked(struct sys_lock *lock, struct work *work)
{
__prof;
sys_mutex_assert_locked(&G.mutex);
sys_assert_locked_e(lock, &G.mutex);
struct work *prev = (struct work *)work->prev_scheduled;
struct work *next = (struct work *)work->next_scheduled;
@ -306,16 +309,17 @@ INTERNAL void work_unschedule_assume_locked(struct work *work)
* Task dequeuing
* ========================== */
INTERNAL struct work_task *work_dequeue_task_assume_locked(struct work *work)
INTERNAL struct work_task *work_dequeue_task_assume_locked(struct sys_lock *lock, struct work *work)
{
__prof;
sys_mutex_assert_locked(&G.mutex);
sys_assert_locked_e(lock, &G.mutex);
struct work_task *task = work->task_head;
if (task) {
work->task_head = task->next_in_work;
if (!work->task_head) {
/* Unschedule work if last task */
work_unschedule_assume_locked(work);
work_unschedule_assume_locked(lock, work);
}
}
return task;
@ -327,12 +331,12 @@ INTERNAL struct work_task *work_dequeue_task_assume_locked(struct work *work)
/* NOTE: This function will release `work` if there are no more tasks once completed.
* Returns `true` if more tasks are still present in the work after completion. */
INTERNAL b32 work_exec_single_task_maybe_release_assume_locked(struct work *work)
INTERNAL b32 work_exec_single_task_maybe_release_assume_locked(struct sys_lock *lock, struct work *work)
{
__prof;
sys_mutex_assert_locked(&G.mutex);
sys_assert_locked_e(lock, &G.mutex);
struct work_task *task = work_dequeue_task_assume_locked(work);
struct work_task *task = work_dequeue_task_assume_locked(lock, work);
b32 more_tasks = work->task_head != NULL;
if (task) {
@ -340,15 +344,14 @@ INTERNAL b32 work_exec_single_task_maybe_release_assume_locked(struct work *work
++work->workers;
/* Do task (temporarily unlock) */
sys_mutex_unlock(&G.mutex);
{
sys_mutex_unlock(lock);
task->func(task->data);
*lock = sys_mutex_lock_e(&G.mutex);
}
sys_mutex_lock(&G.mutex);
--work->workers;
--work->tasks_incomplete;
task_release_assume_locked(task);
task_release_assume_locked(lock, task);
if (work->tasks_incomplete == 0) {
/* Signal finished */
@ -356,23 +359,22 @@ INTERNAL b32 work_exec_single_task_maybe_release_assume_locked(struct work *work
sys_condition_variable_broadcast(&work->condition_variable_finished);
/* Release */
work_release_assume_locked(work);
work_release_assume_locked(lock, work);
}
}
return more_tasks;
}
INTERNAL void work_exec_remaining_tasks_maybe_release_assume_locked(struct work *work)
INTERNAL void work_exec_remaining_tasks_maybe_release_assume_locked(struct sys_lock *lock, struct work *work)
{
__prof;
sys_mutex_assert_locked(&G.mutex);
sys_assert_locked_e(lock, &G.mutex);
b32 more_tasks = true;
while (more_tasks) {
more_tasks = work_exec_single_task_maybe_release_assume_locked(work);
more_tasks = work_exec_single_task_maybe_release_assume_locked(lock, work);
}
}
/* ========================== *
@ -388,21 +390,21 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(worker_thread_entry_point, thread_data)
.is_worker = true
};
sys_mutex_lock(&G.mutex);
struct sys_lock lock = sys_mutex_lock_e(&G.mutex);
{
while (!G.workers_shutdown) {
struct work *work = G.scheduled_work_head;
if (work) {
__profscope(work_pool_task);
--G.idle_worker_count;
work_exec_single_task_maybe_release_assume_locked(work);
work_exec_single_task_maybe_release_assume_locked(&lock, work);
++G.idle_worker_count;
} else {
sys_condition_variable_wait(&G.cv, &G.mutex);
sys_condition_variable_wait(&G.cv, &lock);
}
}
}
sys_mutex_unlock(&G.mutex);
sys_mutex_unlock(&lock);
}
/* ========================== *
@ -410,13 +412,13 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(worker_thread_entry_point, thread_data)
* ========================== */
/* If `help` is true, then the calling thread will start picking up tasks immediately (before other workers can see it) */
INTERNAL struct work_handle work_push_from_slate_assume_locked(struct work_slate *ws, b32 help, enum work_priority priority)
INTERNAL struct work_handle work_push_from_slate_assume_locked(struct sys_lock *lock, struct work_slate *ws, b32 help, enum work_priority priority)
{
__prof;
sys_mutex_assert_locked(&G.mutex);
sys_assert_locked_e(lock, &G.mutex);
struct work *work = work_alloc_assume_locked();
struct work_handle wh = work_to_handle_assume_locked(work);
struct work *work = work_alloc_assume_locked(lock);
struct work_handle wh = work_to_handle_assume_locked(lock, work);
work->priority = priority;
work->status = WORK_STATUS_IN_PROGRESS;
@ -424,10 +426,10 @@ INTERNAL struct work_handle work_push_from_slate_assume_locked(struct work_slate
work->task_head = ws->task_head;
work->tasks_incomplete = ws->num_tasks;
work_schedule_assume_locked(work);
work_schedule_assume_locked(lock, work);
if (help) {
work_exec_remaining_tasks_maybe_release_assume_locked(work);
work_exec_remaining_tasks_maybe_release_assume_locked(lock, work);
} else {
/* When work is submitted from a worker thread, we want the worker to pick
* up the tasks itself when idle workers = 0 and work.workers = 0
@ -447,7 +449,7 @@ INTERNAL struct work_handle work_push_from_slate_assume_locked(struct work_slate
if (ctx->is_worker) {
b32 work_done = false;
while (!work_done && G.idle_worker_count == 0 && work->workers == 0) {
work_done = !work_exec_single_task_maybe_release_assume_locked(work);
work_done = !work_exec_single_task_maybe_release_assume_locked(lock, work);
}
}
}
@ -458,9 +460,9 @@ INTERNAL struct work_handle work_push_from_slate_assume_locked(struct work_slate
INTERNAL struct work_handle work_push_task_internal(work_task_func *func, void *data, b32 help, enum work_priority priority)
{
struct work_handle handle;
sys_mutex_lock(&G.mutex);
struct sys_lock lock = sys_mutex_lock_e(&G.mutex);
{
struct work_task *task = task_alloc_assume_locked();
struct work_task *task = task_alloc_assume_locked(&lock);
task->data = data;
task->func = func;
@ -469,10 +471,9 @@ INTERNAL struct work_handle work_push_task_internal(work_task_func *func, void *
.task_tail = task,
.num_tasks = 1
};
handle = work_push_from_slate_assume_locked(&ws, help, priority);
handle = work_push_from_slate_assume_locked(&lock, &ws, help, priority);
}
sys_mutex_unlock(&G.mutex);
sys_mutex_unlock(&lock);
return handle;
}
@ -503,11 +504,11 @@ void work_slate_push_task(struct work_slate *ws, work_task_func *func, void *dat
__prof;
struct work_task *task = NULL;
sys_mutex_lock(&G.mutex);
struct sys_lock lock = sys_mutex_lock_e(&G.mutex);
{
task = task_alloc_assume_locked();
task = task_alloc_assume_locked(&lock);
}
sys_mutex_unlock(&G.mutex);
sys_mutex_unlock(&lock);
task->data = data;
task->func = func;
@ -528,11 +529,11 @@ struct work_handle work_slate_end(struct work_slate *ws, enum work_priority prio
__prof;
struct work_handle handle;
sys_mutex_lock(&G.mutex);
struct sys_lock lock = sys_mutex_lock_e(&G.mutex);
{
handle = work_push_from_slate_assume_locked(ws, false, priority);
handle = work_push_from_slate_assume_locked(&lock, ws, false, priority);
}
sys_mutex_unlock(&G.mutex);
sys_mutex_unlock(&lock);
return handle;
}
@ -541,9 +542,9 @@ struct work_handle work_slate_end_and_help(struct work_slate *ws, enum work_prio
{
__prof;
sys_mutex_lock(&G.mutex);
struct work_handle handle = work_push_from_slate_assume_locked(ws, true, priority);
sys_mutex_unlock(&G.mutex);
struct sys_lock lock = sys_mutex_lock_e(&G.mutex);
struct work_handle handle = work_push_from_slate_assume_locked(&lock, ws, true, priority);
sys_mutex_unlock(&lock);
return handle;
}
@ -552,9 +553,9 @@ struct work_handle work_slate_end_and_help(struct work_slate *ws, enum work_prio
* Work intervention interface
* ========================== */
INTERNAL struct work *work_from_handle_assume_locked(struct work_handle handle)
INTERNAL struct work *work_from_handle_assume_locked(struct sys_lock *lock, struct work_handle handle)
{
sys_mutex_assert_locked(&G.mutex);
sys_assert_locked_e(lock, &G.mutex);
struct work *work = handle.work;
if (work->gen != handle.gen) {
@ -568,35 +569,35 @@ INTERNAL struct work *work_from_handle_assume_locked(struct work_handle handle)
void work_wait(struct work_handle handle)
{
__prof;
sys_mutex_lock(&G.mutex);
struct sys_lock lock = sys_mutex_lock_e(&G.mutex);
{
struct work *work = work_from_handle_assume_locked(handle);
struct work *work = work_from_handle_assume_locked(&lock, handle);
if (work) {
/* Help with tasks */
work_exec_remaining_tasks_maybe_release_assume_locked(work);
work_exec_remaining_tasks_maybe_release_assume_locked(&lock, work);
/* Wait for work completion */
work = work_from_handle_assume_locked(handle); /* Re-checking work is sitll valid here in case work_do caused work to release */
work = work_from_handle_assume_locked(&lock, handle); /* Re-checking work is sitll valid here in case work_exec caused work to release */
if (work) {
while (work->status != WORK_STATUS_DONE) {
sys_condition_variable_wait(&work->condition_variable_finished, &G.mutex);
sys_condition_variable_wait(&work->condition_variable_finished, &lock);
}
}
}
}
sys_mutex_unlock(&G.mutex);
sys_mutex_unlock(&lock);
}
/* Try to pick up any scheduled tasks */
void work_help(struct work_handle handle)
{
__prof;
sys_mutex_lock(&G.mutex);
struct work *work = work_from_handle_assume_locked(handle);
struct sys_lock lock = sys_mutex_lock_e(&G.mutex);
{
struct work *work = work_from_handle_assume_locked(&lock, handle);
if (work) {
work_exec_remaining_tasks_maybe_release_assume_locked(work);
work_exec_remaining_tasks_maybe_release_assume_locked(&lock, work);
}
sys_mutex_unlock(&G.mutex);
}
sys_mutex_unlock(&lock);
}