From 5eb515ab90c09bf6543a8389d4c769e675fbba37 Mon Sep 17 00:00:00 2001 From: jacob Date: Tue, 25 Jun 2024 19:58:48 -0500 Subject: [PATCH] rename suffix assume_locked -> ...locked --- src/asset_cache.c | 6 ++--- src/mixer.c | 8 +++--- src/sys_win32.c | 18 ++++++------- src/work.c | 66 +++++++++++++++++++++++------------------------ 4 files changed, 49 insertions(+), 49 deletions(-) diff --git a/src/asset_cache.c b/src/asset_cache.c index be322724..93d8403b 100644 --- a/src/asset_cache.c +++ b/src/asset_cache.c @@ -74,7 +74,7 @@ INTERNAL void refresh_dbg_table(void) /* Returns first matching slot or first empty slot if not found. * Check returned slot->hash != 0 for presence. */ -INTERNAL struct asset *asset_cache_get_slot_assume_locked(struct sys_lock *lock, struct string key, u64 hash) +INTERNAL struct asset *asset_cache_get_slot_locked(struct sys_lock *lock, struct string key, u64 hash) { sys_assert_locked_s(lock, &G.lookup_mutex); @@ -123,7 +123,7 @@ struct asset *asset_cache_touch(struct string key, u64 hash, b32 *is_first_touch /* Lookup */ { struct sys_lock lock = sys_mutex_lock_s(&G.lookup_mutex); - asset = asset_cache_get_slot_assume_locked(&lock, key, hash); + asset = asset_cache_get_slot_locked(&lock, key, hash); sys_mutex_unlock(&lock); } @@ -132,7 +132,7 @@ struct asset *asset_cache_touch(struct string key, u64 hash, b32 *is_first_touch struct sys_lock lock = sys_mutex_lock_e(&G.lookup_mutex); /* Re-check asset presence in case it was inserted since lock */ - asset = asset_cache_get_slot_assume_locked(&lock, key, hash); + asset = asset_cache_get_slot_locked(&lock, key, hash); if (!asset->hash) { if (G.num_assets >= MAX_ASSETS) { diff --git a/src/mixer.c b/src/mixer.c index 0fcdf259..d8effc29 100644 --- a/src/mixer.c +++ b/src/mixer.c @@ -102,7 +102,7 @@ INTERNAL struct track *track_from_handle(struct mixer_track_handle handle) } } -INTERNAL struct track *track_alloc_assume_locked(struct sys_lock *lock, struct sound *sound) +INTERNAL struct track *track_alloc_locked(struct sys_lock *lock, struct sound *sound) { sys_assert_locked_e(lock, &G.mutex); @@ -140,7 +140,7 @@ INTERNAL struct track *track_alloc_assume_locked(struct sys_lock *lock, struct s return track; } -INTERNAL void track_release_assume_locked(struct sys_lock *lock, struct track *track) +INTERNAL void track_release_locked(struct sys_lock *lock, struct track *track) { sys_assert_locked_e(lock, &G.mutex); @@ -189,7 +189,7 @@ struct mixer_track_handle mixer_play_ex(struct sound *sound, struct mixer_desc d { struct sys_lock lock = sys_mutex_lock_e(&G.mutex); { - track = track_alloc_assume_locked(&lock, sound); + track = track_alloc_locked(&lock, sound); track->desc = desc; } sys_mutex_unlock(&lock); @@ -474,7 +474,7 @@ struct mixed_pcm_f32 mixer_update(struct arena *arena, u64 frame_count) if (track) { if (mix->track_finished) { /* Release finished tracks */ - track_release_assume_locked(&lock, track); + track_release_locked(&lock, track); } } } diff --git a/src/sys_win32.c b/src/sys_win32.c index da752629..3cc8ab11 100644 --- a/src/sys_win32.c +++ b/src/sys_win32.c @@ -1577,7 +1577,7 @@ struct thread_local_store *sys_thread_get_thread_local_store(void) * Threads * ========================== */ -INTERNAL struct win32_thread *win32_thread_alloc_assume_locked(struct sys_lock *lock) +INTERNAL struct win32_thread *win32_thread_alloc_locked(struct sys_lock *lock) { sys_assert_locked_e(lock, &G.threads_mutex); @@ -1600,7 +1600,7 @@ INTERNAL struct win32_thread *win32_thread_alloc_assume_locked(struct sys_lock * return t; } -INTERNAL void win32_thread_release_assume_locked(struct sys_lock *lock, struct win32_thread *t) +INTERNAL void win32_thread_release_locked(struct sys_lock *lock, struct win32_thread *t) { sys_assert_locked_e(lock, &G.threads_mutex); @@ -1623,7 +1623,7 @@ INTERNAL void win32_thread_release_assume_locked(struct sys_lock *lock, struct w }; } -INTERNAL struct win32_thread *win32_thread_from_sys_thread_assume_locked(struct sys_lock *lock, struct sys_thread st) +INTERNAL struct win32_thread *win32_thread_from_sys_thread_locked(struct sys_lock *lock, struct sys_thread st) { sys_assert_locked_s(lock, &G.threads_mutex); @@ -1636,7 +1636,7 @@ INTERNAL struct win32_thread *win32_thread_from_sys_thread_assume_locked(struct } } -INTERNAL struct sys_thread sys_thread_from_win32_thread_assume_locked(struct sys_lock *lock, struct win32_thread *t) +INTERNAL struct sys_thread sys_thread_from_win32_thread_locked(struct sys_lock *lock, struct win32_thread *t) { sys_assert_locked_s(lock, &G.threads_mutex); @@ -1674,7 +1674,7 @@ INTERNAL DWORD WINAPI win32_thread_proc(LPVOID vt) /* Release thread object */ struct sys_lock lock = sys_mutex_lock_e(&G.threads_mutex); { - win32_thread_release_assume_locked(&lock, t); + win32_thread_release_locked(&lock, t); } sys_mutex_unlock(&lock); @@ -1696,7 +1696,7 @@ struct sys_thread sys_thread_alloc(sys_thread_entry_point_func *entry_point, voi struct sys_lock lock = sys_mutex_lock_e(&G.threads_mutex); { /* Allocate thread object */ - struct win32_thread *t = win32_thread_alloc_assume_locked(&lock); + struct win32_thread *t = win32_thread_alloc_locked(&lock); t->entry_point = entry_point; t->thread_data = thread_data; @@ -1716,7 +1716,7 @@ struct sys_thread sys_thread_alloc(sys_thread_entry_point_func *entry_point, voi sys_panic(STR("Failed to create thread")); } - res = sys_thread_from_win32_thread_assume_locked(&lock, t); + res = sys_thread_from_win32_thread_locked(&lock, t); } sys_mutex_unlock(&lock); @@ -1730,7 +1730,7 @@ void sys_thread_wait_release(struct sys_thread *thread) /* Lookup */ struct sys_lock lock = sys_mutex_lock_s(&G.threads_mutex); { - struct win32_thread *t = win32_thread_from_sys_thread_assume_locked(&lock, *thread); + struct win32_thread *t = win32_thread_from_sys_thread_locked(&lock, *thread); if (t) { handle = t->handle; } @@ -2132,7 +2132,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance, HANDLE app_thread_handle = 0; struct sys_lock lock = sys_mutex_lock_s(&G.threads_mutex); { - struct win32_thread *wt = win32_thread_from_sys_thread_assume_locked(&lock, app_thread); + struct win32_thread *wt = win32_thread_from_sys_thread_locked(&lock, app_thread); app_thread_handle = wt->handle; } sys_mutex_unlock(&lock); diff --git a/src/work.c b/src/work.c index 715ad82c..5d3c83a2 100644 --- a/src/work.c +++ b/src/work.c @@ -162,7 +162,7 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(work_shutdown) * Internal work / task allocation * ========================== */ -INTERNAL struct work *work_alloc_assume_locked(struct sys_lock *lock) +INTERNAL struct work *work_alloc_locked(struct sys_lock *lock) { __prof; sys_assert_locked_e(lock, &G.mutex); @@ -189,7 +189,7 @@ INTERNAL struct work *work_alloc_assume_locked(struct sys_lock *lock) return work; } -INTERNAL void work_release_assume_locked(struct sys_lock *lock, struct work *work) +INTERNAL void work_release_locked(struct sys_lock *lock, struct work *work) { sys_assert_locked_e(lock, &G.mutex); @@ -198,7 +198,7 @@ INTERNAL void work_release_assume_locked(struct sys_lock *lock, struct work *wor ++work->gen; } -INTERNAL struct work_handle work_to_handle_assume_locked(struct sys_lock *lock, struct work *work) +INTERNAL struct work_handle work_to_handle_locked(struct sys_lock *lock, struct work *work) { sys_assert_locked_e(lock, &G.mutex); @@ -208,7 +208,7 @@ INTERNAL struct work_handle work_to_handle_assume_locked(struct sys_lock *lock, }; } -INTERNAL struct work_task *task_alloc_assume_locked(struct sys_lock *lock) +INTERNAL struct work_task *task_alloc_locked(struct sys_lock *lock) { sys_assert_locked_e(lock, &G.mutex); @@ -228,7 +228,7 @@ INTERNAL struct work_task *task_alloc_assume_locked(struct sys_lock *lock) return task; } -INTERNAL void task_release_assume_locked(struct sys_lock *lock, struct work_task *task) +INTERNAL void task_release_locked(struct sys_lock *lock, struct work_task *task) { sys_assert_locked_e(lock, &G.mutex); @@ -240,7 +240,7 @@ INTERNAL void task_release_assume_locked(struct sys_lock *lock, struct work_task * Work scheduling / insertion * ========================== */ -INTERNAL void work_schedule_assume_locked(struct sys_lock *lock, struct work *work) +INTERNAL void work_schedule_locked(struct sys_lock *lock, struct work *work) { __prof; sys_assert_locked_e(lock, &G.mutex); @@ -278,7 +278,7 @@ INTERNAL void work_schedule_assume_locked(struct sys_lock *lock, struct work *wo sys_condition_variable_signal(&G.cv, work->tasks_incomplete); } -INTERNAL void work_unschedule_assume_locked(struct sys_lock *lock, struct work *work) +INTERNAL void work_unschedule_locked(struct sys_lock *lock, struct work *work) { __prof; sys_assert_locked_e(lock, &G.mutex); @@ -309,7 +309,7 @@ INTERNAL void work_unschedule_assume_locked(struct sys_lock *lock, struct work * * Task dequeuing * ========================== */ -INTERNAL struct work_task *work_dequeue_task_assume_locked(struct sys_lock *lock, struct work *work) +INTERNAL struct work_task *work_dequeue_task_locked(struct sys_lock *lock, struct work *work) { __prof; sys_assert_locked_e(lock, &G.mutex); @@ -319,7 +319,7 @@ INTERNAL struct work_task *work_dequeue_task_assume_locked(struct sys_lock *lock work->task_head = task->next_in_work; if (!work->task_head) { /* Unschedule work if last task */ - work_unschedule_assume_locked(lock, work); + work_unschedule_locked(lock, work); } } return task; @@ -331,12 +331,12 @@ INTERNAL struct work_task *work_dequeue_task_assume_locked(struct sys_lock *lock /* NOTE: This function will release `work` if there are no more tasks once completed. * Returns `true` if more tasks are still present in the work after completion. */ -INTERNAL b32 work_exec_single_task_maybe_release_assume_locked(struct sys_lock *lock, struct work *work) +INTERNAL b32 work_exec_single_task_maybe_release_locked(struct sys_lock *lock, struct work *work) { __prof; sys_assert_locked_e(lock, &G.mutex); - struct work_task *task = work_dequeue_task_assume_locked(lock, work); + struct work_task *task = work_dequeue_task_locked(lock, work); b32 more_tasks = work->task_head != NULL; if (task) { @@ -351,7 +351,7 @@ INTERNAL b32 work_exec_single_task_maybe_release_assume_locked(struct sys_lock * } --work->workers; --work->tasks_incomplete; - task_release_assume_locked(lock, task); + task_release_locked(lock, task); if (work->tasks_incomplete == 0) { /* Signal finished */ @@ -359,21 +359,21 @@ INTERNAL b32 work_exec_single_task_maybe_release_assume_locked(struct sys_lock * sys_condition_variable_broadcast(&work->condition_variable_finished); /* Release */ - work_release_assume_locked(lock, work); + work_release_locked(lock, work); } } return more_tasks; } -INTERNAL void work_exec_remaining_tasks_maybe_release_assume_locked(struct sys_lock *lock, struct work *work) +INTERNAL void work_exec_remaining_tasks_maybe_release_locked(struct sys_lock *lock, struct work *work) { __prof; sys_assert_locked_e(lock, &G.mutex); b32 more_tasks = true; while (more_tasks) { - more_tasks = work_exec_single_task_maybe_release_assume_locked(lock, work); + more_tasks = work_exec_single_task_maybe_release_locked(lock, work); } } @@ -397,7 +397,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(worker_thread_entry_point, thread_data) if (work) { __profscope(work_pool_task); --G.idle_worker_count; - work_exec_single_task_maybe_release_assume_locked(&lock, work); + work_exec_single_task_maybe_release_locked(&lock, work); ++G.idle_worker_count; } else { sys_condition_variable_wait(&G.cv, &lock); @@ -412,13 +412,13 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(worker_thread_entry_point, thread_data) * ========================== */ /* If `help` is true, then the calling thread will start picking up tasks immediately (before other workers can see it) */ -INTERNAL struct work_handle work_push_from_slate_assume_locked(struct sys_lock *lock, struct work_slate *ws, b32 help, enum work_priority priority) +INTERNAL struct work_handle work_push_from_slate_locked(struct sys_lock *lock, struct work_slate *ws, b32 help, enum work_priority priority) { __prof; sys_assert_locked_e(lock, &G.mutex); - struct work *work = work_alloc_assume_locked(lock); - struct work_handle wh = work_to_handle_assume_locked(lock, work); + struct work *work = work_alloc_locked(lock); + struct work_handle wh = work_to_handle_locked(lock, work); work->priority = priority; work->status = WORK_STATUS_IN_PROGRESS; @@ -426,10 +426,10 @@ INTERNAL struct work_handle work_push_from_slate_assume_locked(struct sys_lock * work->task_head = ws->task_head; work->tasks_incomplete = ws->num_tasks; - work_schedule_assume_locked(lock, work); + work_schedule_locked(lock, work); if (help) { - work_exec_remaining_tasks_maybe_release_assume_locked(lock, work); + work_exec_remaining_tasks_maybe_release_locked(lock, work); } else { /* When work is submitted from a worker thread, we want the worker to pick * up the tasks itself when idle workers = 0 and work.workers = 0 @@ -449,7 +449,7 @@ INTERNAL struct work_handle work_push_from_slate_assume_locked(struct sys_lock * if (ctx->is_worker) { b32 work_done = false; while (!work_done && G.idle_worker_count == 0 && work->workers == 0) { - work_done = !work_exec_single_task_maybe_release_assume_locked(lock, work); + work_done = !work_exec_single_task_maybe_release_locked(lock, work); } } } @@ -462,7 +462,7 @@ INTERNAL struct work_handle work_push_task_internal(work_task_func *func, void * struct work_handle handle; struct sys_lock lock = sys_mutex_lock_e(&G.mutex); { - struct work_task *task = task_alloc_assume_locked(&lock); + struct work_task *task = task_alloc_locked(&lock); task->data = data; task->func = func; @@ -471,7 +471,7 @@ INTERNAL struct work_handle work_push_task_internal(work_task_func *func, void * .task_tail = task, .num_tasks = 1 }; - handle = work_push_from_slate_assume_locked(&lock, &ws, help, priority); + handle = work_push_from_slate_locked(&lock, &ws, help, priority); } sys_mutex_unlock(&lock); return handle; @@ -506,7 +506,7 @@ void work_slate_push_task(struct work_slate *ws, work_task_func *func, void *dat struct work_task *task = NULL; struct sys_lock lock = sys_mutex_lock_e(&G.mutex); { - task = task_alloc_assume_locked(&lock); + task = task_alloc_locked(&lock); } sys_mutex_unlock(&lock); @@ -531,7 +531,7 @@ struct work_handle work_slate_end(struct work_slate *ws, enum work_priority prio struct work_handle handle; struct sys_lock lock = sys_mutex_lock_e(&G.mutex); { - handle = work_push_from_slate_assume_locked(&lock, ws, false, priority); + handle = work_push_from_slate_locked(&lock, ws, false, priority); } sys_mutex_unlock(&lock); @@ -543,7 +543,7 @@ struct work_handle work_slate_end_and_help(struct work_slate *ws, enum work_prio __prof; struct sys_lock lock = sys_mutex_lock_e(&G.mutex); - struct work_handle handle = work_push_from_slate_assume_locked(&lock, ws, true, priority); + struct work_handle handle = work_push_from_slate_locked(&lock, ws, true, priority); sys_mutex_unlock(&lock); return handle; @@ -553,7 +553,7 @@ struct work_handle work_slate_end_and_help(struct work_slate *ws, enum work_prio * Work intervention interface * ========================== */ -INTERNAL struct work *work_from_handle_assume_locked(struct sys_lock *lock, struct work_handle handle) +INTERNAL struct work *work_from_handle_locked(struct sys_lock *lock, struct work_handle handle) { sys_assert_locked_e(lock, &G.mutex); @@ -571,13 +571,13 @@ void work_wait(struct work_handle handle) __prof; struct sys_lock lock = sys_mutex_lock_e(&G.mutex); { - struct work *work = work_from_handle_assume_locked(&lock, handle); + struct work *work = work_from_handle_locked(&lock, handle); if (work) { /* Help with tasks */ - work_exec_remaining_tasks_maybe_release_assume_locked(&lock, work); + work_exec_remaining_tasks_maybe_release_locked(&lock, work); /* Wait for work completion */ - work = work_from_handle_assume_locked(&lock, handle); /* Re-checking work is sitll valid here in case work_exec caused work to release */ + work = work_from_handle_locked(&lock, handle); /* Re-checking work is sitll valid here in case work_exec caused work to release */ if (work) { while (work->status != WORK_STATUS_DONE) { sys_condition_variable_wait(&work->condition_variable_finished, &lock); @@ -594,9 +594,9 @@ void work_help(struct work_handle handle) __prof; struct sys_lock lock = sys_mutex_lock_e(&G.mutex); { - struct work *work = work_from_handle_assume_locked(&lock, handle); + struct work *work = work_from_handle_locked(&lock, handle); if (work) { - work_exec_remaining_tasks_maybe_release_assume_locked(&lock, work); + work_exec_remaining_tasks_maybe_release_locked(&lock, work); } } sys_mutex_unlock(&lock);