move synchronization primitives out of sys layer

This commit is contained in:
jacob 2025-07-06 16:20:07 -05:00
parent 8c080477a6
commit 69a8a7aa9a
20 changed files with 334 additions and 527 deletions

View File

@ -982,6 +982,12 @@ void OnBuild(StringList cli_args)
SH_Print(Lit("No work to do\n"));
}
if (!D_Exists(executable_file)) {
/* Create blank executible if build fails (since Visual Studio can get
* confused if no build target exists) */
D_ClearWrite(executable_file, Lit(""));
}
#if 0
if (!success) {
Error(Lit("Build failed\n"));

View File

@ -31,10 +31,10 @@ struct exit_callback {
GLOBAL struct {
struct arena *arena;
struct string write_path;
struct sync_flag exit_sf;
struct snc_counter exit_fence;
/* Exit callbacks */
struct sys_mutex *exit_callbacks_mutex;
struct snc_mutex exit_callbacks_mutex;
struct arena *exit_callbacks_arena;
struct exit_callback *exit_callbacks_head;
} G = ZI, DEBUG_ALIAS(G, G_app);
@ -103,12 +103,12 @@ INTERNAL struct sys_window_settings default_window_settings(struct sys_window *w
void app_register_exit_callback(app_exit_callback_func *func)
{
struct sys_lock lock = sys_mutex_lock_e(G.exit_callbacks_mutex);
struct snc_lock lock = snc_lock_e(&G.exit_callbacks_mutex);
struct exit_callback *callback = arena_push(G.exit_callbacks_arena, struct exit_callback);
callback->func = func;
callback->next = G.exit_callbacks_head;
G.exit_callbacks_head = callback;
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* ========================== *
@ -226,8 +226,6 @@ void sys_app_entry(struct string args_str)
bitbuff_test();
#endif
G.exit_sf = sync_flag_alloc();
G.exit_callbacks_mutex = sys_mutex_alloc();
G.exit_callbacks_arena = arena_alloc(GIGABYTE(64));
G.arena = arena_alloc(GIGABYTE(64));
@ -317,7 +315,7 @@ void sys_app_entry(struct string args_str)
sys_window_show(window);
/* Wait for app_exit() */
sync_flag_wait(&G.exit_sf);
snc_counter_wait_gtz(&G.exit_fence);
/* Run exit callbacks */
/* FIXME: Only wait on shutdown for a certain period of time before
@ -325,11 +323,11 @@ void sys_app_entry(struct string args_str)
* if something gets stuck) */
{
__profn("Run exit callbacks");
struct sys_lock lock = sys_mutex_lock_e(G.exit_callbacks_mutex);
struct snc_lock lock = snc_lock_e(&G.exit_callbacks_mutex);
for (struct exit_callback *callback = G.exit_callbacks_head; callback; callback = callback->next) {
callback->func();
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* Write window settings to file */
@ -360,5 +358,5 @@ void sys_app_entry(struct string args_str)
void app_exit(void)
{
sync_flag_set(&G.exit_sf);
snc_counter_add(&G.exit_fence, 1);
}

View File

@ -5,6 +5,7 @@
#include "arena.h"
#include "util.h"
#include "log.h"
#include "snc.h"
/* ========================== *
* Global state
@ -14,18 +15,18 @@
#define ASSET_LOOKUP_TABLE_CAPACITY (MAX_ASSETS * 4)
GLOBAL struct {
struct sys_mutex *lookup_mutex;
struct snc_mutex lookup_mutex;
struct asset lookup[ASSET_LOOKUP_TABLE_CAPACITY];
u64 num_assets;
struct sys_mutex *store_mutex;
struct snc_mutex store_mutex;
struct arena *store_arena;
#if RTC
/* Array of len `num_assets` pointing into populated entries of `lookup`. */
struct asset *dbg_table[ASSET_LOOKUP_TABLE_CAPACITY];
u64 dbg_table_count;
struct sys_mutex *dbg_table_mutex;
struct snc_mutex dbg_table_mutex;
#endif
} G = ZI, DEBUG_ALIAS(G, G_asset_cache);
@ -35,15 +36,8 @@ GLOBAL struct {
struct asset_cache_startup_receipt asset_cache_startup(void)
{
/* Init lookup */
G.lookup_mutex = sys_mutex_alloc();
/* Init store */
G.store_mutex = sys_mutex_alloc();
G.store_arena = arena_alloc(GIGABYTE(64));
#if RTC
/* Init debug */
G.dbg_table_mutex = sys_mutex_alloc();
#endif
return (struct asset_cache_startup_receipt) { 0 };
}
@ -55,7 +49,7 @@ struct asset_cache_startup_receipt asset_cache_startup(void)
INTERNAL void refresh_dbg_table(void)
{
#if RTC
struct sys_lock lock = sys_mutex_lock_e(G.dbg_table_mutex);
struct snc_lock lock = snc_lock_e(&G.dbg_table_mutex);
MEMZERO_ARRAY(G.dbg_table);
G.dbg_table_count = 0;
for (u64 i = 0; i < countof(G.lookup); ++i) {
@ -64,15 +58,15 @@ INTERNAL void refresh_dbg_table(void)
G.dbg_table[G.dbg_table_count++] = asset;
}
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
#endif
}
/* Returns first matching slot or first empty slot if not found.
* Check returned slot->hash != 0 for presence. */
INTERNAL struct asset *asset_cache_get_slot_locked(struct sys_lock *lock, struct string key, u64 hash)
INTERNAL struct asset *asset_cache_get_slot_locked(struct snc_lock *lock, struct string key, u64 hash)
{
sys_assert_locked_e_or_s(lock, G.lookup_mutex);
snc_assert_locked_e_or_s(lock, &G.lookup_mutex);
(UNUSED)lock;
u64 index = hash % countof(G.lookup);
@ -119,14 +113,14 @@ struct asset *asset_cache_touch(struct string key, u64 hash, b32 *is_first_touch
/* Lookup */
{
struct sys_lock lock = sys_mutex_lock_s(G.lookup_mutex);
struct snc_lock lock = snc_lock_s(&G.lookup_mutex);
asset = asset_cache_get_slot_locked(&lock, key, hash);
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* Insert if not found */
if (!asset->hash) {
struct sys_lock lock = sys_mutex_lock_e(G.lookup_mutex);
struct snc_lock lock = snc_lock_e(&G.lookup_mutex);
/* Re-check asset presence in case it was inserted since lock */
asset = asset_cache_get_slot_locked(&lock, key, hash);
@ -147,8 +141,7 @@ struct asset *asset_cache_touch(struct string key, u64 hash, b32 *is_first_touch
*asset = (struct asset) {
.status = ASSET_STATUS_UNINITIALIZED,
.hash = hash,
.key = key_stored,
.asset_ready_sf = sync_flag_alloc()
.key = key_stored
};
if (is_first_touch) {
*is_first_touch = true;
@ -158,7 +151,7 @@ struct asset *asset_cache_touch(struct string key, u64 hash, b32 *is_first_touch
refresh_dbg_table();
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
return asset;
@ -179,8 +172,7 @@ void asset_cache_mark_ready(struct asset *asset, void *store_data)
{
asset->store_data = store_data;
asset->status = ASSET_STATUS_READY;
WRITE_BARRIER();
sync_flag_set(&asset->asset_ready_sf);
snc_counter_add(&asset->counter, 1);
}
/* ========================== *
@ -189,14 +181,7 @@ void asset_cache_mark_ready(struct asset *asset, void *store_data)
void asset_cache_wait(struct asset *asset)
{
if (asset->status != ASSET_STATUS_READY) {
/* Wait on job */
if (asset->counter) {
sys_counter_wait(asset->counter);
}
/* Wait for asset to be ready */
sync_flag_wait(&asset->asset_ready_sf);
}
snc_counter_wait_gtz(&asset->counter);
}
/* ========================== *
@ -218,7 +203,7 @@ void *asset_cache_get_store_data(struct asset *asset)
/* Asset store should be opened to allocate memory to the store arena */
struct asset_cache_store asset_cache_store_open(void)
{
struct sys_lock lock = sys_mutex_lock_e(G.store_mutex);
struct snc_lock lock = snc_lock_e(&G.store_mutex);
struct asset_cache_store store = {
.lock = lock,
.arena = G.store_arena
@ -228,5 +213,5 @@ struct asset_cache_store asset_cache_store_open(void)
void asset_cache_store_close(struct asset_cache_store *store)
{
sys_mutex_unlock(&store->lock);
snc_unlock(&store->lock);
}

View File

@ -18,11 +18,10 @@ struct asset {
u64 hash;
struct string key;
struct sys_counter *counter;
struct snc_counter counter;
/* Managed via asset_cache_mark_x functions */
enum asset_status status;
struct sync_flag asset_ready_sf;
/* Accessed via asset_cache_get_data */
void *store_data;
@ -32,7 +31,7 @@ struct asset_cache_store {
struct arena *arena;
/* Internal */
struct sys_lock lock;
struct snc_lock lock;
};
struct asset_cache_startup_receipt { i32 _; };

View File

@ -24,7 +24,7 @@ struct font_task_params {
struct font_task_params_store {
struct font_task_params *head_free;
struct arena *arena;
struct sys_mutex *mutex;
struct snc_mutex mutex;
};
/* ========================== *
@ -50,7 +50,6 @@ struct font_startup_receipt font_startup(struct gp_startup_receipt *gp_sr,
(UNUSED)resource_sr;
G.params.arena = arena_alloc(GIGABYTE(64));
G.params.mutex = sys_mutex_alloc();
return (struct font_startup_receipt) { 0 };
}
@ -63,24 +62,24 @@ INTERNAL struct font_task_params *font_task_params_alloc(void)
{
struct font_task_params *p = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(G.params.mutex);
struct snc_lock lock = snc_lock_e(&G.params.mutex);
if (G.params.head_free) {
p = G.params.head_free;
G.params.head_free = p->next_free;
} else {
p = arena_push(G.params.arena, struct font_task_params);
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
return p;
}
INTERNAL void font_task_params_release(struct font_task_params *p)
{
struct sys_lock lock = sys_mutex_lock_e(G.params.mutex);
struct snc_lock lock = snc_lock_e(&G.params.mutex);
p->next_free = G.params.head_free;
G.params.head_free = p;
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* ========================== *
@ -188,10 +187,7 @@ struct asset *font_load_asset(struct string path, f32 point_size, b32 wait)
/* Push task */
asset_cache_mark_loading(asset);
if (wait) {
asset->counter = sys_counter_alloc();
}
sys_run(1, font_load_asset_job, params, SYS_PRIORITY_BACKGROUND, asset->counter);
sys_run(1, font_load_asset_job, params, SYS_PRIORITY_BACKGROUND, NULL);
if (wait) {
asset_cache_wait(asset);
}

View File

@ -11,6 +11,7 @@
#include "rand.h"
#include "sprite.h"
#include "gstat.h"
#include "snc.h"
/* Include common shader types */
#define SH_CPU 1
@ -132,7 +133,7 @@ struct command_queue {
ID3D12CommandQueue *cq;
struct arena *arena;
struct sys_mutex *submit_fence_mutex;
struct snc_mutex submit_fence_mutex;
u64 submit_fence_target;
ID3D12Fence *submit_fence;
@ -146,7 +147,7 @@ struct command_queue {
struct command_list_pool {
struct command_queue *cq;
struct arena *arena;
struct sys_mutex *mutex;
struct snc_mutex mutex;
struct command_list *first_submitted_command_list;
struct command_list *last_submitted_command_list;
};
@ -156,7 +157,7 @@ struct command_list {
struct command_list_pool *pool;
struct ID3D12CommandAllocator *ca;
struct ID3D12GraphicsCommandList *cl;
struct sys_lock global_record_lock;
struct snc_lock global_record_lock;
struct command_descriptor_heap *first_command_descriptor_heap;
struct command_buffer *first_command_buffer;
@ -241,7 +242,7 @@ struct swapchain {
struct cpu_descriptor_heap {
enum D3D12_DESCRIPTOR_HEAP_TYPE type;
struct arena *arena;
struct sys_mutex *mutex;
struct snc_mutex mutex;
u32 descriptor_size;
u32 num_descriptors_reserved;
@ -270,23 +271,23 @@ struct fenced_release_data {
GLOBAL struct {
/* Descriptor heaps pool */
struct sys_mutex *command_descriptor_heaps_mutex;
struct snc_mutex command_descriptor_heaps_mutex;
struct arena *command_descriptor_heaps_arena;
struct command_descriptor_heap *first_submitted_command_descriptor_heap;
struct command_descriptor_heap *last_submitted_command_descriptor_heap;
/* Command buffers pool */
struct sys_mutex *command_buffers_mutex;
struct snc_mutex command_buffers_mutex;
struct arena *command_buffers_arena;
struct dict *command_buffers_dict;
/* Resources pool */
struct sys_mutex *resources_mutex;
struct snc_mutex resources_mutex;
struct arena *resources_arena;
struct dx12_resource *first_free_resource;
/* Pipeline cache */
struct sys_mutex *pipelines_mutex;
struct snc_mutex pipelines_mutex;
struct arena *pipelines_arena;
struct dict *pipeline_descs;
struct dict *top_pipelines; /* Latest pipelines */
@ -294,7 +295,7 @@ GLOBAL struct {
struct pipeline_scope *first_free_pipeline_scope;
/* Fenced release queue */
struct sys_mutex *fenced_releases_mutex;
struct snc_mutex fenced_releases_mutex;
struct arena *fenced_releases_arena;
u64 fenced_release_targets[DX12_NUM_QUEUES];
@ -316,8 +317,8 @@ GLOBAL struct {
struct cpu_descriptor_heap *rtv_heap;
/* Command queues */
struct sys_mutex *global_command_list_record_mutex;
struct sys_mutex *global_submit_mutex;
struct snc_mutex global_command_list_record_mutex;
struct snc_mutex global_submit_mutex;
struct command_queue *command_queues[DX12_NUM_QUEUES];
/* Swapchain */
@ -352,27 +353,22 @@ struct gp_startup_receipt gp_startup(void)
__prof;
/* Initialize command descriptor heaps pool */
G.command_descriptor_heaps_mutex = sys_mutex_alloc();
G.command_descriptor_heaps_arena = arena_alloc(GIGABYTE(64));
/* Initialize command buffers pool */
G.command_buffers_mutex = sys_mutex_alloc();
G.command_buffers_arena = arena_alloc(GIGABYTE(64));
G.command_buffers_dict = dict_init(G.command_buffers_arena, 4096);
/* Initialize resources pool */
G.resources_mutex = sys_mutex_alloc();
G.resources_arena = arena_alloc(GIGABYTE(64));
/* Initialize pipeline cache */
G.pipelines_mutex = sys_mutex_alloc();
G.pipelines_arena = arena_alloc(GIGABYTE(64));
G.pipeline_descs = dict_init(G.pipelines_arena, 1024);
G.top_pipelines = dict_init(G.pipelines_arena, 1024);
G.top_successful_pipelines = dict_init(G.pipelines_arena, 1024);
/* Initialize fenced releases queue */
G.fenced_releases_mutex = sys_mutex_alloc();
G.fenced_releases_arena = arena_alloc(GIGABYTE(64));
/* Initialize dx12 */
@ -589,8 +585,6 @@ INTERNAL void dx12_init_objects(void)
G.rtv_heap = cpu_descriptor_heap_alloc(D3D12_DESCRIPTOR_HEAP_TYPE_RTV);
/* Create command queues */
G.global_command_list_record_mutex = sys_mutex_alloc();
G.global_submit_mutex = sys_mutex_alloc();
for (u32 i = 0; i < DX12_NUM_QUEUES; ++i) {
if (i == DX12_QUEUE_DIRECT) {
G.command_queues[i] = command_queue_alloc(D3D12_COMMAND_LIST_TYPE_DIRECT, D3D12_COMMAND_QUEUE_PRIORITY_NORMAL, LIT("Direct queue"));
@ -684,7 +678,7 @@ struct dx12_include_handler {
ID3DInclude d3d_handler;
ID3DIncludeVtbl vtbl;
struct pipeline *pipeline;
struct sys_mutex *pipeline_mutex;
struct snc_mutex pipeline_mutex;
u64 num_open_resources;
struct resource open_resources[1024];
};
@ -703,12 +697,12 @@ INTERNAL HRESULT dx12_include_open(ID3DInclude *d3d_handler, D3D_INCLUDE_TYPE in
sys_panic(LIT("Dx12 include handler resource overflow"));
}
struct sys_lock lock = sys_mutex_lock_e(handler->pipeline_mutex);
struct snc_lock lock = snc_lock_e(&handler->pipeline_mutex);
{
struct pipeline *pipeline = handler->pipeline;
dict_set(pipeline->arena, pipeline->dependencies, hash, 1);
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
struct resource *res = &handler->open_resources[handler->num_open_resources++];
*res = resource_open(name);
@ -743,7 +737,6 @@ INTERNAL struct dx12_include_handler *dx12_include_handler_alloc(struct arena *a
handler->vtbl.Open = dx12_include_open;
handler->vtbl.Close = dx12_include_close;
handler->pipeline = pipeline;
handler->pipeline_mutex = sys_mutex_alloc();
return handler;
}
@ -755,7 +748,6 @@ INTERNAL void dx12_include_handler_release(struct dx12_include_handler *handler)
resource_close(res);
}
handler->num_open_resources = 0;
sys_mutex_release(handler->pipeline_mutex);
}
enum shader_compile_job_kind {
@ -927,12 +919,9 @@ INTERNAL SYS_JOB_DEF(pipeline_init_job, job)
if (success) {
struct shader_compile_job_param *params[] = { &vs, &ps };
struct shader_compile_job_sig comp_sig = { .params = params };
struct sys_counter *counter = sys_counter_alloc();
{
sys_run(countof(params), shader_compile_job, &comp_sig, SYS_PRIORITY_HIGH, counter);
sys_counter_wait(counter);
}
sys_counter_release(counter);
struct snc_counter counter = ZI;
sys_run(countof(params), shader_compile_job, &comp_sig, SYS_PRIORITY_HIGH, &counter);
snc_counter_wait_ltez(&counter);
success = vs.success && ps.success;
}
@ -1124,12 +1113,9 @@ INTERNAL void pipeline_alloc(u64 num_pipelines, struct pipeline_desc *descs_in,
{
__prof;
struct pipeline_init_job_sig sig = { .descs_in = descs_in, .pipelines_out = pipelines_out };
struct sys_counter *counter = sys_counter_alloc();
{
sys_run(num_pipelines, pipeline_init_job, &sig, SYS_PRIORITY_HIGH, counter);
sys_counter_wait(counter);
}
sys_counter_release(counter);
struct snc_counter counter = ZI;
sys_run(num_pipelines, pipeline_init_job, &sig, SYS_PRIORITY_HIGH, &counter);
snc_counter_wait_ltez(&counter);
}
INTERNAL void pipeline_release_now(struct pipeline *pipeline)
@ -1150,12 +1136,12 @@ INTERNAL struct pipeline_scope *pipeline_scope_begin(void)
__prof;
struct pipeline_scope *scope = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(G.pipelines_mutex);
struct snc_lock lock = snc_lock_e(&G.pipelines_mutex);
if (G.first_free_pipeline_scope) {
scope = G.first_free_pipeline_scope;
G.first_free_pipeline_scope = scope->next_free;
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
struct arena *arena = NULL;
if (scope) {
@ -1173,7 +1159,7 @@ INTERNAL struct pipeline_scope *pipeline_scope_begin(void)
INTERNAL void pipeline_scope_end(struct pipeline_scope *scope)
{
__prof;
struct sys_lock lock = sys_mutex_lock_e(G.pipelines_mutex);
struct snc_lock lock = snc_lock_e(&G.pipelines_mutex);
{
for (struct dict_entry *entry = scope->refs->first; entry; entry = entry->next) {
struct pipeline *pipeline = (struct pipeline *)entry->value;
@ -1184,7 +1170,7 @@ INTERNAL void pipeline_scope_end(struct pipeline_scope *scope)
scope->next_free = G.first_free_pipeline_scope;
G.first_free_pipeline_scope = scope;
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
INTERNAL READONLY struct pipeline g_nil_pipeline = ZI;
@ -1199,12 +1185,12 @@ INTERNAL struct pipeline *pipeline_from_name(struct pipeline_scope *scope, struc
res = tmp;
} else {
{
struct sys_lock lock = sys_mutex_lock_e(G.pipelines_mutex);
struct snc_lock lock = snc_lock_e(&G.pipelines_mutex);
tmp = dict_get(G.top_successful_pipelines, hash);
if (tmp) {
++tmp->refcount;
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
if (tmp) {
dict_set(scope->arena, scope->refs, hash, (u64)tmp);
@ -1218,7 +1204,7 @@ INTERNAL struct pipeline *pipeline_from_name(struct pipeline_scope *scope, struc
INTERNAL void pipeline_register(u64 num_pipelines, struct pipeline **pipelines)
{
__prof;
struct sys_lock lock = sys_mutex_lock_e(G.pipelines_mutex);
struct snc_lock lock = snc_lock_e(&G.pipelines_mutex);
{
for (u64 i = 0; i < num_pipelines; ++i) {
struct pipeline *pipeline = pipelines[i];
@ -1243,7 +1229,7 @@ INTERNAL void pipeline_register(u64 num_pipelines, struct pipeline **pipelines)
}
}
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
#if RESOURCE_RELOADING
@ -1257,7 +1243,7 @@ INTERNAL RESOURCE_WATCH_CALLBACK_FUNC_DEF(pipeline_resource_watch_callback, name
u32 num_pipelines = 0;
struct pipeline_desc *pipeline_descs = arena_push_dry(scratch.arena, struct pipeline_desc);
{
struct sys_lock lock = sys_mutex_lock_s(G.pipelines_mutex);
struct snc_lock lock = snc_lock_s(&G.pipelines_mutex);
for (struct dict_entry *entry = G.top_pipelines->first; entry; entry = entry->next) {
struct pipeline *pipeline = (struct pipeline *)entry->value;
if (dict_get(pipeline->dependencies, hash) == 1) {
@ -1266,7 +1252,7 @@ INTERNAL RESOURCE_WATCH_CALLBACK_FUNC_DEF(pipeline_resource_watch_callback, name
++num_pipelines;
}
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* Recompile dirty pipelines */
@ -1274,7 +1260,7 @@ INTERNAL RESOURCE_WATCH_CALLBACK_FUNC_DEF(pipeline_resource_watch_callback, name
struct pipeline **pipelines = arena_push_array(scratch.arena, struct pipeline *, num_pipelines);
pipeline_alloc(num_pipelines, pipeline_descs, pipelines);
{
struct sys_lock lock = sys_mutex_lock_s(G.pipelines_mutex);
struct snc_lock lock = snc_lock_s(&G.pipelines_mutex);
for (u32 i = 0; i < num_pipelines; ++i) {
struct pipeline *pipeline = pipelines[i];
if (pipeline->success) {
@ -1295,7 +1281,7 @@ INTERNAL RESOURCE_WATCH_CALLBACK_FUNC_DEF(pipeline_resource_watch_callback, name
}
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
pipeline_register(num_pipelines, pipelines);
}
@ -1315,7 +1301,7 @@ INTERNAL struct descriptor *descriptor_alloc(struct cpu_descriptor_heap *dh)
u32 index = 0;
D3D12_CPU_DESCRIPTOR_HANDLE handle = ZI;
{
struct sys_lock lock = sys_mutex_lock_e(dh->mutex);
struct snc_lock lock = snc_lock_e(&dh->mutex);
if (dh->first_free_descriptor) {
d = dh->first_free_descriptor;
dh->first_free_descriptor = d->next_free;
@ -1329,7 +1315,7 @@ INTERNAL struct descriptor *descriptor_alloc(struct cpu_descriptor_heap *dh)
index = dh->num_descriptors_reserved++;
handle.ptr = dh->handle.ptr + (index * dh->descriptor_size);
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
MEMZERO_STRUCT(d);
d->heap = dh;
@ -1341,12 +1327,12 @@ INTERNAL struct descriptor *descriptor_alloc(struct cpu_descriptor_heap *dh)
INTERNAL void descriptor_release(struct descriptor *descriptor)
{
struct cpu_descriptor_heap *dh = descriptor->heap;
struct sys_lock lock = sys_mutex_lock_e(dh->mutex);
struct snc_lock lock = snc_lock_e(&dh->mutex);
{
descriptor->next_free = dh->first_free_descriptor;
dh->first_free_descriptor = descriptor;
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* ========================== *
@ -1362,7 +1348,6 @@ INTERNAL struct cpu_descriptor_heap *cpu_descriptor_heap_alloc(enum D3D12_DESCRI
dh = arena_push(arena, struct cpu_descriptor_heap);
dh->arena = arena;
}
dh->mutex = sys_mutex_alloc();
u32 num_descriptors = 0;
u32 descriptor_size = 0;
@ -1551,17 +1536,17 @@ INTERNAL void fenced_release(void *data, enum fenced_release_kind kind)
/* Read current fence target values from command queues */
for (u32 i = 0; i < countof(G.command_queues); ++i) {
struct command_queue *cq = G.command_queues[i];
struct sys_lock lock = sys_mutex_lock_s(cq->submit_fence_mutex);
struct snc_lock lock = snc_lock_s(&cq->submit_fence_mutex);
fr_targets[i] = cq->submit_fence_target;
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* Push data to release queue */
{
struct sys_lock lock = sys_mutex_lock_e(G.fenced_releases_mutex);
struct snc_lock lock = snc_lock_e(&G.fenced_releases_mutex);
*arena_push(G.fenced_releases_arena, struct fenced_release_data) = fr;
MEMCPY(G.fenced_release_targets, fr_targets, sizeof(fr_targets));
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* Wake evictor */
@ -1585,14 +1570,14 @@ INTERNAL struct dx12_resource *dx12_resource_alloc(D3D12_HEAP_PROPERTIES heap_pr
__prof;
struct dx12_resource *r = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(G.resources_mutex);
struct snc_lock lock = snc_lock_e(&G.resources_mutex);
if (G.first_free_resource) {
r = G.first_free_resource;
G.first_free_resource = r->next_free;
} else {
r = arena_push_no_zero(G.resources_arena, struct dx12_resource);
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
MEMZERO_STRUCT(r);
@ -1658,10 +1643,10 @@ INTERNAL void dx12_resource_release_now(struct dx12_resource *t)
ID3D12Resource_Release(t->resource);
/* Add to free list */
struct sys_lock lock = sys_mutex_lock_e(G.resources_mutex);
struct snc_lock lock = snc_lock_e(&G.resources_mutex);
t->next_free = G.first_free_resource;
G.first_free_resource = t;
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
INTERNAL enum D3D12_RESOURCE_STATES dx12_resource_barrier(ID3D12GraphicsCommandList *cl, struct dx12_resource *resource, enum D3D12_RESOURCE_STATES state)
@ -1727,7 +1712,6 @@ INTERNAL struct command_queue *command_queue_alloc(enum D3D12_COMMAND_LIST_TYPE
(UNUSED)dbg_name;
cq->type = type;
cq->submit_fence_mutex = sys_mutex_alloc();
cq->cl_pool = command_list_pool_alloc(cq);
return cq;
}
@ -1753,7 +1737,6 @@ INTERNAL struct command_list_pool *command_list_pool_alloc(struct command_queue
pool->arena = arena;
}
pool->cq = cq;
pool->mutex = sys_mutex_alloc();
return pool;
}
@ -1767,7 +1750,7 @@ INTERNAL struct command_list *command_list_open(struct command_list_pool *pool)
struct ID3D12GraphicsCommandList *old_cl = NULL;
struct ID3D12CommandAllocator *old_ca = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(pool->mutex);
struct snc_lock lock = snc_lock_e(&pool->mutex);
/* Find first command list ready for reuse */
for (struct command_list *tmp = pool->first_submitted_command_list; tmp; tmp = tmp->next_submitted) {
if (completed_fence_value >= tmp->submitted_fence_target) {
@ -1794,12 +1777,12 @@ INTERNAL struct command_list *command_list_open(struct command_list_pool *pool)
} else {
cl = arena_push_no_zero(pool->arena, struct command_list);
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
MEMZERO_STRUCT(cl);
cl->cq = cq;
cl->pool = pool;
cl->global_record_lock = sys_mutex_lock_s(G.global_command_list_record_mutex);
cl->global_record_lock = snc_lock_s(&G.global_command_list_record_mutex);
HRESULT hr = 0;
if (old_cl) {
@ -1857,20 +1840,20 @@ INTERNAL u64 command_list_close(struct command_list *cl)
u64 submit_fence_target = 0;
{
__profn("Execute");
struct sys_lock submit_lock = sys_mutex_lock_s(G.global_submit_mutex);
struct sys_lock fence_lock = sys_mutex_lock_e(cq->submit_fence_mutex);
struct snc_lock submit_lock = snc_lock_s(&G.global_submit_mutex);
struct snc_lock fence_lock = snc_lock_e(&cq->submit_fence_mutex);
{
submit_fence_target = ++cq->submit_fence_target;
ID3D12CommandQueue_ExecuteCommandLists(cq->cq, 1, (ID3D12CommandList **)&cl->cl);
ID3D12CommandQueue_Signal(cq->cq, cq->submit_fence, submit_fence_target);
}
sys_mutex_unlock(&fence_lock);
sys_mutex_unlock(&submit_lock);
snc_unlock(&fence_lock);
snc_unlock(&submit_lock);
}
/* Add descriptor heaps to submitted list */
{
struct sys_lock lock = sys_mutex_lock_e(G.command_descriptor_heaps_mutex);
struct snc_lock lock = snc_lock_e(&G.command_descriptor_heaps_mutex);
for (struct command_descriptor_heap *cdh = cl->first_command_descriptor_heap; cdh; cdh = cdh->next_in_command_list) {
cdh->submitted_cq = cq;
cdh->submitted_fence_target = submit_fence_target;
@ -1881,12 +1864,12 @@ INTERNAL u64 command_list_close(struct command_list *cl)
}
G.last_submitted_command_descriptor_heap = cdh;
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* Add command buffers to submitted list */
{
struct sys_lock lock = sys_mutex_lock_e(G.command_buffers_mutex);
struct snc_lock lock = snc_lock_e(&G.command_buffers_mutex);
for (struct command_buffer *cb = cl->first_command_buffer; cb; cb = cb->next_in_command_list) {
struct command_buffer_group *group = cb->group;
cb->submitted_cq = cq;
@ -1898,21 +1881,21 @@ INTERNAL u64 command_list_close(struct command_list *cl)
}
group->last_submitted = cb;
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* Add command list to pool submitted list */
sys_mutex_unlock(&cl->global_record_lock);
snc_unlock(&cl->global_record_lock);
cl->submitted_fence_target = submit_fence_target;
{
struct sys_lock lock = sys_mutex_lock_e(pool->mutex);
struct snc_lock lock = snc_lock_e(&pool->mutex);
if (pool->last_submitted_command_list) {
pool->last_submitted_command_list->next_submitted = cl;
} else {
pool->first_submitted_command_list = cl;
}
pool->last_submitted_command_list = cl;
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
return submit_fence_target;
@ -1933,7 +1916,7 @@ INTERNAL struct command_descriptor_heap *command_list_push_descriptor_heap(struc
D3D12_CPU_DESCRIPTOR_HANDLE old_cpu_handle = ZI;
D3D12_GPU_DESCRIPTOR_HANDLE old_gpu_handle = ZI;
{
struct sys_lock lock = sys_mutex_lock_e(G.command_descriptor_heaps_mutex);
struct snc_lock lock = snc_lock_e(&G.command_descriptor_heaps_mutex);
/* Find first heap ready for reuse */
for (struct command_descriptor_heap *tmp = G.first_submitted_command_descriptor_heap; tmp; tmp = tmp->next_submitted) {
/* TODO: Cache completed fence values */
@ -1964,7 +1947,7 @@ INTERNAL struct command_descriptor_heap *command_list_push_descriptor_heap(struc
/* No available heap available for reuse, allocate new */
cdh = arena_push_no_zero(G.command_descriptor_heaps_arena, struct command_descriptor_heap);
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
MEMZERO_STRUCT(cdh);
@ -1987,9 +1970,9 @@ INTERNAL struct command_descriptor_heap *command_list_push_descriptor_heap(struc
/* Copy CPU heap */
{
struct sys_lock lock = sys_mutex_lock_s(dh_cpu->mutex);
struct snc_lock lock = snc_lock_s(&dh_cpu->mutex);
ID3D12Device_CopyDescriptorsSimple(G.device, dh_cpu->num_descriptors_reserved, cdh->cpu_handle, dh_cpu->handle, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* Insert into command list */
@ -2037,7 +2020,7 @@ INTERNAL struct command_buffer *command_list_push_buffer(struct command_list *cl
struct command_buffer *cb = NULL;
struct dx12_resource *resource = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(G.command_buffers_mutex);
struct snc_lock lock = snc_lock_e(&G.command_buffers_mutex);
{
u64 group_hash = command_buffer_hash_from_size(size);
@ -2077,7 +2060,7 @@ INTERNAL struct command_buffer *command_list_push_buffer(struct command_list *cl
/* Allocate new */
cb = arena_push_no_zero(G.command_buffers_arena, struct command_buffer);
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
MEMZERO_STRUCT(cb);
cb->group = cb_group;
@ -2596,9 +2579,9 @@ INTERNAL struct swapchain_buffer *update_swapchain(struct swapchain *swapchain,
/* Lock direct queue submissions (in case any write to backbuffer) */
/* TODO: Less overkill approach - Only flush present_blit since we know it's the only operation targeting backbuffer */
struct sys_lock lock = sys_mutex_lock_e(cq->submit_fence_mutex);
struct snc_lock lock = snc_lock_e(&cq->submit_fence_mutex);
//DEBUGBREAKABLE;
//struct sys_lock lock = sys_mutex_lock_e(G.global_command_list_record_mutex);
//struct snc_lock lock = snc_lock_e(&G.global_command_list_record_mutex);
{
/* Flush direct queue */
//ID3D12CommandQueue_Signal(cq->cq, cq->submit_fence, ++cq->submit_fence_target);
@ -2623,7 +2606,7 @@ INTERNAL struct swapchain_buffer *update_swapchain(struct swapchain *swapchain,
sys_panic(LIT("Failed to resize swapchain"));
}
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
} else {
/* Create swapchain1 */
IDXGISwapChain1 *swapchain1 = NULL;
@ -2816,12 +2799,12 @@ void gp_present(struct sys_window *window, struct v2i32 backresolution, struct g
{
__profn("Mark queue frames");
/* Lock because frame marks shouldn't occur while command lists are recording */
struct sys_lock lock = sys_mutex_lock_e(G.global_command_list_record_mutex);
struct snc_lock lock = snc_lock_e(&G.global_command_list_record_mutex);
for (u32 i = 0; i < countof(G.command_queues); ++i) {
struct command_queue *cq = G.command_queues[i];
__prof_dx12_new_frame(cq->prof);
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
{
__profn("Collect queues");
@ -2863,13 +2846,13 @@ INTERNAL SYS_THREAD_DEF(evictor_thread_entry_point, arg)
struct fenced_release_data *fenced_releases = NULL;
{
__profn("Copy queued releases");
struct sys_lock lock = sys_mutex_lock_e(G.fenced_releases_mutex);
struct snc_lock lock = snc_lock_e(&G.fenced_releases_mutex);
num_fenced_releases = G.fenced_releases_arena->pos / sizeof(struct fenced_release_data);
fenced_releases = arena_push_array_no_zero(temp.arena, struct fenced_release_data, num_fenced_releases);
MEMCPY(fenced_releases, arena_base(G.fenced_releases_arena), G.fenced_releases_arena->pos);
arena_reset(G.fenced_releases_arena);
MEMCPY(targets, G.fenced_release_targets, sizeof(targets));
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* Wait until fences reach target */

View File

@ -198,7 +198,6 @@ struct host *host_alloc(u16 listen_port)
host->sock = sock_alloc(listen_port, MEGABYTE(2), MEGABYTE(2));
host->rcv_buffer_write_mutex = sys_mutex_alloc();
host->receiver_thread = sys_thread_alloc(&host_receiver_thread_entry_point, host, LIT("Host receiver"), PROF_THREAD_GROUP_IO);
return host;
@ -211,7 +210,6 @@ void host_release(struct host *host)
while (!sys_thread_try_release(host->receiver_thread, 0.001f)) {
sock_wake(host->sock);
}
sys_mutex_release(host->rcv_buffer_write_mutex);
sock_release(host->sock);
@ -659,11 +657,11 @@ struct host_event_list host_update_begin(struct arena *arena, struct host *host)
/* Swap read & write rcv buffers */
{
struct sys_lock lock = sys_mutex_lock_e(host->rcv_buffer_write_mutex);
struct snc_lock lock = snc_lock_e(&host->rcv_buffer_write_mutex);
struct host_rcv_buffer *swp = host->rcv_buffer_read;
host->rcv_buffer_read = host->rcv_buffer_write;
host->rcv_buffer_write = swp;
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* Read incoming packets */
@ -1082,7 +1080,7 @@ INTERNAL SYS_THREAD_DEF(host_receiver_thread_entry_point, arg)
struct sock_address address = res.address;
struct string data = res.data;
if (data.len > 0) {
struct sys_lock lock = sys_mutex_lock_e(host->rcv_buffer_write_mutex);
struct snc_lock lock = snc_lock_e(&host->rcv_buffer_write_mutex);
{
struct host_rcv_buffer *rcv_buffer = host->rcv_buffer_write;
struct host_rcv_packet *packet = arena_push(rcv_buffer->arena, struct host_rcv_packet);
@ -1095,7 +1093,7 @@ INTERNAL SYS_THREAD_DEF(host_receiver_thread_entry_point, arg)
}
rcv_buffer->last_packet = packet;
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
}
}

View File

@ -3,6 +3,7 @@
#include "sys.h"
#include "sock.h"
#include "snc.h"
#define HOST_CHANNEL_ID_NIL (struct host_channel_id) { .gen = 0, .idx = 0 }
#define HOST_CHANNEL_ID_ALL (struct host_channel_id) { .gen = U32_MAX, .idx = U32_MAX }
@ -90,7 +91,7 @@ struct host {
u64 num_msg_assembler_lookup_bins;
/* Double buffer for incoming data */
struct sys_mutex *rcv_buffer_write_mutex;
struct snc_mutex rcv_buffer_write_mutex;
struct host_rcv_buffer *rcv_buffer_read;
struct host_rcv_buffer *rcv_buffer_write;

View File

@ -2,6 +2,7 @@
#include "log.h"
#include "string.h"
#include "atomic.h"
#include "snc.h"
struct log_event_callback {
log_event_callback_func *func;
@ -16,7 +17,7 @@ struct log_event_callback {
GLOBAL struct {
struct atomic_i32 initialized;
struct sys_mutex *callbacks_mutex;
struct snc_mutex callbacks_mutex;
struct arena *callbacks_arena;
struct log_event_callback *first_callback;
struct log_event_callback *last_callback;
@ -64,7 +65,6 @@ GLOBAL READONLY struct log_level_settings g_log_level_settings[LOG_LEVEL_COUNT]
void log_startup(struct string logfile_path)
{
G.callbacks_arena = arena_alloc(MEGABYTE(8));
G.callbacks_mutex = sys_mutex_alloc();
if (logfile_path.len > 0) {
/* Create / wipe log file */
sys_file_close(sys_file_open_write(logfile_path));
@ -84,7 +84,7 @@ void log_startup(struct string logfile_path)
void log_register_callback(log_event_callback_func *func, i32 level)
{
if (!atomic_i32_fetch(&G.initialized)) { return; }
struct sys_lock lock = sys_mutex_lock_e(G.callbacks_mutex);
struct snc_lock lock = snc_lock_e(&G.callbacks_mutex);
{
struct log_event_callback *callback = arena_push(G.callbacks_arena, struct log_event_callback);
callback->func = func;
@ -96,7 +96,7 @@ void log_register_callback(log_event_callback_func *func, i32 level)
}
G.last_callback = callback;
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* ========================== *
@ -213,14 +213,14 @@ void _log(i32 level, struct string msg)
event.line = line;
#endif
{
struct sys_lock lock = sys_mutex_lock_s(G.callbacks_mutex);
struct snc_lock lock = snc_lock_s(&G.callbacks_mutex);
for (struct log_event_callback *callback = G.first_callback; callback; callback = callback->next) {
if (level <= callback->level) {
__profn("Run log callback");
callback->func(event);
}
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
scratch_end(scratch);

View File

@ -3,6 +3,7 @@
#include "sound.h"
#include "sys.h"
#include "math.h"
#include "snc.h"
/* TODO: Cap max sounds playing. */
@ -51,7 +52,7 @@ struct track {
};
GLOBAL struct {
struct sys_mutex *mutex;
struct snc_mutex mutex;
/* Listener */
struct v2 listener_pos;
@ -72,7 +73,6 @@ GLOBAL struct {
struct mixer_startup_receipt mixer_startup(void)
{
G.track_arena = arena_alloc(GIGABYTE(64));
G.mutex = sys_mutex_alloc();
G.listener_pos = V2(0, 0);
G.listener_dir = V2(0, -1);
@ -101,9 +101,9 @@ INTERNAL struct track *track_from_handle(struct mixer_track_handle handle)
}
}
INTERNAL struct track *track_alloc_locked(struct sys_lock *lock, struct sound *sound)
INTERNAL struct track *track_alloc_locked(struct snc_lock *lock, struct sound *sound)
{
sys_assert_locked_e(lock, G.mutex);
snc_assert_locked_e(lock, &G.mutex);
(UNUSED)lock;
struct track *track = NULL;
@ -140,9 +140,9 @@ INTERNAL struct track *track_alloc_locked(struct sys_lock *lock, struct sound *s
return track;
}
INTERNAL void track_release_locked(struct sys_lock *lock, struct track *track)
INTERNAL void track_release_locked(struct snc_lock *lock, struct track *track)
{
sys_assert_locked_e(lock, G.mutex);
snc_assert_locked_e(lock, &G.mutex);
(UNUSED)lock;
/* Remove from playing list */
@ -188,12 +188,12 @@ struct mixer_track_handle mixer_play_ex(struct sound *sound, struct mixer_desc d
{
struct track *track;
{
struct sys_lock lock = sys_mutex_lock_e(G.mutex);
struct snc_lock lock = snc_lock_e(&G.mutex);
{
track = track_alloc_locked(&lock, sound);
track->desc = desc;
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
return track_to_handle(track);
}
@ -206,7 +206,7 @@ struct mixer_desc mixer_track_get(struct mixer_track_handle handle)
struct track *track = track_from_handle(handle);
if (track) {
/* TODO: Only lock mutex on track itself or something */
struct sys_lock lock = sys_mutex_lock_e(G.mutex);
struct snc_lock lock = snc_lock_e(&G.mutex);
{
/* Confirm handle is still valid now that we're locked */
track = track_from_handle(handle);
@ -214,7 +214,7 @@ struct mixer_desc mixer_track_get(struct mixer_track_handle handle)
res = track->desc;
}
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
return res;
@ -226,7 +226,7 @@ void mixer_track_set(struct mixer_track_handle handle, struct mixer_desc desc)
struct track *track = track_from_handle(handle);
if (track) {
/* TODO: Only lock mutex on track itself or something */
struct sys_lock lock = sys_mutex_lock_e(G.mutex);
struct snc_lock lock = snc_lock_e(&G.mutex);
{
/* Confirm handle is still valid now that we're locked */
track = track_from_handle(handle);
@ -234,18 +234,18 @@ void mixer_track_set(struct mixer_track_handle handle, struct mixer_desc desc)
track->desc = desc;
}
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
}
void mixer_set_listener(struct v2 pos, struct v2 dir)
{
struct sys_lock lock = sys_mutex_lock_e(G.mutex);
struct snc_lock lock = snc_lock_e(&G.mutex);
{
G.listener_pos = pos;
G.listener_dir = v2_norm(dir);
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* ========================== *
@ -281,7 +281,7 @@ struct mixed_pcm_f32 mixer_update(struct arena *arena, u64 frame_count)
struct mix **mixes = NULL;
u64 mixes_count = 0;
{
struct sys_lock lock = sys_mutex_lock_e(G.mutex);
struct snc_lock lock = snc_lock_e(&G.mutex);
/* Read listener info */
listener_pos = G.listener_pos;
@ -296,7 +296,7 @@ struct mixed_pcm_f32 mixer_update(struct arena *arena, u64 frame_count)
mixes[mixes_count++] = mix;
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
for (u64 mix_index = 0; mix_index < mixes_count; ++mix_index) {
@ -469,7 +469,7 @@ struct mixed_pcm_f32 mixer_update(struct arena *arena, u64 frame_count)
{
__profn("Update track effect data");
struct sys_lock lock = sys_mutex_lock_e(G.mutex);
struct snc_lock lock = snc_lock_e(&G.mutex);
for (u64 i = 0; i < mixes_count; ++i) {
struct mix *mix = mixes[i];
struct track *track = track_from_handle(mix->track_handle);
@ -480,7 +480,7 @@ struct mixed_pcm_f32 mixer_update(struct arena *arena, u64 frame_count)
}
}
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
scratch_end(scratch);

View File

@ -29,12 +29,12 @@ GLOBAL struct {
struct sys_watch *watch;
struct atomic_i32 watch_shutdown;
struct sys_mutex *watch_dispatcher_mutex;
struct snc_mutex watch_dispatcher_mutex;
struct arena *watch_dispatcher_info_arena;
struct sys_watch_info_list watch_dispatcher_info_list;
struct sys_condition_variable *watch_dispatcher_cv;
struct snc_cv watch_dispatcher_cv;
struct sys_mutex *watch_callbacks_mutex;
struct snc_mutex watch_callbacks_mutex;
resource_watch_callback *watch_callbacks[64];
u64 num_watch_callbacks;
#endif
@ -69,11 +69,8 @@ struct resource_startup_receipt resource_startup(void)
#if RESOURCE_RELOADING
G.watch = sys_watch_alloc(LIT("res"));
G.watch_callbacks_mutex = sys_mutex_alloc();
G.watch_dispatcher_mutex = sys_mutex_alloc();
G.watch_dispatcher_info_arena = arena_alloc(GIGABYTE(64));
G.watch_dispatcher_cv = sys_condition_variable_alloc();
app_register_exit_callback(&resource_shutdown);
G.resource_watch_monitor_thread = sys_thread_alloc(resource_watch_monitor_thread_entry_point, NULL, LIT("Resource watch monitor"), PROF_THREAD_GROUP_IO);
@ -162,7 +159,7 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(resource_shutdown)
__prof;
atomic_i32_fetch_set(&G.watch_shutdown, 1);
sys_condition_variable_broadcast(G.watch_dispatcher_cv);
snc_cv_broadcast(&G.watch_dispatcher_cv);
sys_watch_wake(G.watch);
sys_thread_wait_release(G.resource_watch_dispatch_thread);
@ -171,7 +168,7 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(resource_shutdown)
void resource_register_watch_callback(resource_watch_callback *callback)
{
struct sys_lock lock = sys_mutex_lock_e(G.watch_callbacks_mutex);
struct snc_lock lock = snc_lock_e(&G.watch_callbacks_mutex);
{
if (G.num_watch_callbacks < countof(G.watch_callbacks)) {
G.watch_callbacks[G.num_watch_callbacks++] = callback;
@ -179,7 +176,7 @@ void resource_register_watch_callback(resource_watch_callback *callback)
sys_panic(LIT("Max resource watch callbacks reached"));
}
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
INTERNAL SYS_THREAD_DEF(resource_watch_monitor_thread_entry_point, _)
@ -191,7 +188,7 @@ INTERNAL SYS_THREAD_DEF(resource_watch_monitor_thread_entry_point, _)
struct arena_temp temp = arena_temp_begin(scratch.arena);
struct sys_watch_info_list res = sys_watch_wait(temp.arena, G.watch);
if (res.first && !atomic_i32_fetch(&G.watch_shutdown)) {
struct sys_lock lock = sys_mutex_lock_e(G.watch_dispatcher_mutex);
struct snc_lock lock = snc_lock_e(&G.watch_dispatcher_mutex);
{
struct sys_watch_info_list list_part = sys_watch_info_copy(G.watch_dispatcher_info_arena, res);
if (G.watch_dispatcher_info_list.last) {
@ -202,8 +199,8 @@ INTERNAL SYS_THREAD_DEF(resource_watch_monitor_thread_entry_point, _)
G.watch_dispatcher_info_list = list_part;
}
}
sys_mutex_unlock(&lock);
sys_condition_variable_broadcast(G.watch_dispatcher_cv);
snc_unlock(&lock);
snc_cv_broadcast(&G.watch_dispatcher_cv);
}
arena_temp_end(temp);
}
@ -237,17 +234,17 @@ INTERNAL SYS_THREAD_DEF(resource_watch_dispatcher_thread_entry_point, _)
(UNUSED)_;
struct arena_temp scratch = scratch_begin_no_conflict();
struct sys_lock watch_dispatcher_lock = sys_mutex_lock_e(G.watch_dispatcher_mutex);
struct snc_lock watch_dispatcher_lock = snc_lock_e(&G.watch_dispatcher_mutex);
while (!atomic_i32_fetch(&G.watch_shutdown)) {
sys_condition_variable_wait(G.watch_dispatcher_cv, &watch_dispatcher_lock);
snc_cv_wait(&G.watch_dispatcher_cv, &watch_dispatcher_lock);
if (!atomic_i32_fetch(&G.watch_shutdown) && G.watch_dispatcher_info_arena->pos > 0) {
__profn("Dispatch resource watch callbacks");
/* Unlock and sleep a bit so duplicate events pile up */
{
__profn("Delay");
sys_mutex_unlock(&watch_dispatcher_lock);
snc_unlock(&watch_dispatcher_lock);
sys_sleep(WATCH_DISPATCHER_DELAY_SECONDS);
watch_dispatcher_lock = sys_mutex_lock_e(G.watch_dispatcher_mutex);
watch_dispatcher_lock = snc_lock_e(&G.watch_dispatcher_mutex);
}
if (!atomic_i32_fetch(&G.watch_shutdown)) {
struct arena_temp temp = arena_temp_begin(scratch.arena);
@ -260,7 +257,7 @@ INTERNAL SYS_THREAD_DEF(resource_watch_dispatcher_thread_entry_point, _)
/* Build callbacks array */
u64 num_callbacks = 0;
resource_watch_callback **callbacks = NULL;
struct sys_lock callbacks_lock = sys_mutex_lock_s(G.watch_callbacks_mutex);
struct snc_lock callbacks_lock = snc_lock_s(&G.watch_callbacks_mutex);
{
num_callbacks = G.num_watch_callbacks;
callbacks = arena_push_array_no_zero(temp.arena, resource_watch_callback *, num_callbacks);
@ -268,10 +265,10 @@ INTERNAL SYS_THREAD_DEF(resource_watch_dispatcher_thread_entry_point, _)
callbacks[i] = G.watch_callbacks[i];
}
}
sys_mutex_unlock(&callbacks_lock);
snc_unlock(&callbacks_lock);
/* Unlock and run callbacks */
sys_mutex_unlock(&watch_dispatcher_lock);
snc_unlock(&watch_dispatcher_lock);
{
struct dict *dedup_dict = dict_init(temp.arena, WATCH_DISPATCHER_DEDUP_DICT_BINS);
for (struct sys_watch_info *info = watch_info_list.first; info; info = info->next) {
@ -288,16 +285,13 @@ INTERNAL SYS_THREAD_DEF(resource_watch_dispatcher_thread_entry_point, _)
struct resource_watch_callback_job_sig sig = ZI;
sig.name = info->name;
sig.callbacks = callbacks;
struct sys_counter *counter = sys_counter_alloc();
{
sys_run(num_callbacks, resource_watch_callback_job, &sig, SYS_PRIORITY_BACKGROUND, counter);
sys_counter_wait(counter);
}
sys_counter_release(counter);
struct snc_counter counter = ZI;
sys_run(num_callbacks, resource_watch_callback_job, &sig, SYS_PRIORITY_BACKGROUND, &counter);
snc_counter_wait_ltez(&counter);
}
}
}
watch_dispatcher_lock = sys_mutex_lock_e(G.watch_dispatcher_mutex);
watch_dispatcher_lock = snc_lock_e(&G.watch_dispatcher_mutex);
arena_temp_end(temp);
}

View File

@ -77,6 +77,8 @@ void snc_unlock(struct snc_lock *l)
void snc_cv_wait(struct snc_cv *cv, struct snc_lock *l)
{
u64 old_wake_gen = atomic_u64_fetch(&cv->wake_gen);
struct snc_mutex *mutex = l->mutex;
b32 exclusive = l->exclusive;
u64 wake_gen = old_wake_gen;
{
snc_unlock(l);
@ -85,10 +87,10 @@ void snc_cv_wait(struct snc_cv *cv, struct snc_lock *l)
wake_gen = atomic_u64_fetch(&cv->wake_gen);
} while (wake_gen == old_wake_gen);
sys_wake_all(&cv->wake_gen);
if (l->exclusive) {
*l= snc_lock_e(l->mutex);
if (exclusive) {
*l= snc_lock_e(mutex);
} else {
*l= snc_lock_s(l->mutex);
*l= snc_lock_s(mutex);
}
}
}
@ -98,3 +100,34 @@ void snc_cv_broadcast(struct snc_cv *cv)
atomic_u64_fetch_add_u64(&cv->wake_gen, 1);
sys_wake_all(&cv->wake_gen);
}
/* ========================== *
* Counter
* ========================== */
void snc_counter_add(struct snc_counter *counter, i64 amount)
{
i64 old_v = atomic_i64_fetch_add(&counter->v, amount);
i64 new_v = old_v + amount;
if ((old_v > 0 && new_v <= 0) || (old_v <= 0 && new_v > 0)) {
sys_wake_all(&counter->v);
}
}
void snc_counter_wait_ltez(struct snc_counter *counter)
{
i64 v = atomic_i64_fetch(&counter->v);
while (v > 0) {
sys_wait(&counter->v, &v, sizeof(v));
v = atomic_i64_fetch(&counter->v);
}
}
void snc_counter_wait_gtz(struct snc_counter *counter)
{
i64 v = atomic_i64_fetch(&counter->v);
while (v <= 0) {
sys_wait(&counter->v, &v, sizeof(v));
v = atomic_i64_fetch(&counter->v);
}
}

View File

@ -37,4 +37,20 @@ struct snc_cv {
void snc_cv_wait(struct snc_cv *cv, struct snc_lock *lock);
void snc_cv_broadcast(struct snc_cv *cv);
/* ========================== *
* Fence
* ========================== */
/* ========================== *
* Counter
* ========================== */
struct snc_counter {
struct atomic_i64 v;
};
void snc_counter_add(struct snc_counter *counter, i64 amount);
void snc_counter_wait_ltez(struct snc_counter *counter);
void snc_counter_wait_gtz(struct snc_counter *counter);
#endif

View File

@ -4,6 +4,7 @@
#include "log.h"
#include "string.h"
#include "gstat.h"
#include "snc.h"
#define WIN32_LEAN_AND_MEAN
#define UNICODE
@ -40,7 +41,7 @@ struct win32_sock {
GLOBAL struct {
WSADATA wsa_data;
struct arena *win32_socks_arena;
struct sys_mutex *win32_socks_mutex;
struct snc_mutex win32_socks_mutex;
struct win32_sock *first_free_win32_sock;
} G = ZI, DEBUG_ALIAS(G, G_sock_win32);
@ -53,7 +54,6 @@ struct sock_startup_receipt sock_startup(void)
/* Startup winsock */
WSAStartup(MAKEWORD(2, 2), &G.wsa_data);
G.win32_socks_arena = arena_alloc(GIGABYTE(64));
G.win32_socks_mutex = sys_mutex_alloc();
return (struct sock_startup_receipt) { 0 };
}
@ -296,14 +296,14 @@ INTERNAL struct win32_sock *win32_sock_alloc(void)
{
struct win32_sock *ws = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(G.win32_socks_mutex);
struct snc_lock lock = snc_lock_e(&G.win32_socks_mutex);
if (G.first_free_win32_sock) {
ws = G.first_free_win32_sock;
G.first_free_win32_sock = ws->next_free;
} else {
ws = arena_push_no_zero(G.win32_socks_arena, struct win32_sock);
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
MEMZERO_STRUCT(ws);
return ws;
@ -311,10 +311,10 @@ INTERNAL struct win32_sock *win32_sock_alloc(void)
INTERNAL void win32_sock_release(struct win32_sock *ws)
{
struct sys_lock lock = sys_mutex_lock_e(G.win32_socks_mutex);
struct snc_lock lock = snc_lock_e(&G.win32_socks_mutex);
ws->next_free = G.first_free_win32_sock;
G.first_free_win32_sock = ws;
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
struct sock *sock_alloc(u16 listen_port, u64 sndbuf_size, u64 rcvbuf_size)

View File

@ -18,7 +18,7 @@ struct sound_task_params {
struct sound_task_params_store {
struct sound_task_params *head_free;
struct arena *arena;
struct sys_mutex *mutex;
struct snc_mutex mutex;
};
/* ========================== *
@ -40,7 +40,6 @@ struct sound_startup_receipt sound_startup(struct asset_cache_startup_receipt *a
(UNUSED)resource_sr;
G.params.arena = arena_alloc(GIGABYTE(64));
G.params.mutex = sys_mutex_alloc();
return (struct sound_startup_receipt) { 0 };
}
@ -53,24 +52,24 @@ INTERNAL struct sound_task_params *sound_task_params_alloc(void)
{
struct sound_task_params *p = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(G.params.mutex);
struct snc_lock lock = snc_lock_e(&G.params.mutex);
if (G.params.head_free) {
p = G.params.head_free;
G.params.head_free = p->next_free;
} else {
p = arena_push(G.params.arena, struct sound_task_params);
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
return p;
}
INTERNAL void sound_task_params_release(struct sound_task_params *p)
{
struct sys_lock lock = sys_mutex_lock_e(G.params.mutex);
struct snc_lock lock = snc_lock_e(&G.params.mutex);
p->next_free = G.params.head_free;
G.params.head_free = p;
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* ========================== *
@ -143,7 +142,6 @@ INTERNAL SYS_JOB_DEF(sound_load_asset_job, job)
asset_cache_store_close(&store);
}
*sound = (struct sound) { 0 };
asset_cache_mark_ready(asset, sound);
}
@ -181,10 +179,7 @@ struct asset *sound_load_asset(struct string path, u32 flags, b32 wait)
/* Push task */
asset_cache_mark_loading(asset);
if (wait) {
asset->counter = sys_counter_alloc();
}
sys_run(1, sound_load_asset_job, params, SYS_PRIORITY_BACKGROUND, asset->counter);
sys_run(1, sound_load_asset_job, params, SYS_PRIORITY_BACKGROUND, &asset->counter);
if (wait) {
asset_cache_wait(asset);
}

View File

@ -10,6 +10,7 @@
#include "gp.h"
#include "math.h"
#include "rand.h"
#include "snc.h"
/* The evictor will begin evicting once cache usage is > threshold.
* It will entries until the budget has shrunk < target. */
@ -89,7 +90,7 @@ struct cache_entry {
};
struct cache_bin {
struct sys_mutex *mutex;
struct snc_mutex mutex;
struct cache_entry *first;
struct cache_entry *last;
};
@ -98,7 +99,7 @@ struct cache {
struct atomic_u64 memory_usage;
struct arena *arena;
struct cache_bin *bins;
struct sys_mutex *entry_pool_mutex;
struct snc_mutex entry_pool_mutex;
struct cache_entry *entry_pool_first_free;
};
@ -140,7 +141,7 @@ GLOBAL struct {
struct cache cache;
/* Load cmds */
struct sys_mutex *load_cmds_mutex;
struct snc_mutex load_cmds_mutex;
struct arena *load_cmds_arena;
struct load_cmd *first_free_load_cmd;
@ -152,8 +153,8 @@ GLOBAL struct {
/* Evictor thread */
struct atomic_i32 evictor_cycle;
b32 evictor_scheduler_shutdown;
struct sys_mutex *evictor_scheduler_mutex;
struct sys_condition_variable *evictor_scheduler_shutdown_cv;
struct snc_mutex evictor_scheduler_mutex;
struct snc_cv evictor_scheduler_shutdown_cv;
struct sys_thread *evictor_scheduler_thread;
} G = ZI, DEBUG_ALIAS(G, G_sprite);
@ -241,21 +242,13 @@ struct sprite_startup_receipt sprite_startup(struct gp_startup_receipt *gp_sr,
}
arena_set_readonly(G.perm_arena);
G.cache.entry_pool_mutex = sys_mutex_alloc();
G.cache.arena = arena_alloc(GIGABYTE(64));
G.cache.bins = arena_push_array(G.cache.arena, struct cache_bin, CACHE_BINS_COUNT);
for (u64 i = 0; i < CACHE_BINS_COUNT; ++i) {
G.cache.bins[i].mutex = sys_mutex_alloc();
}
G.load_cmds_arena = arena_alloc(GIGABYTE(64));
G.load_cmds_mutex = sys_mutex_alloc();
G.scopes_arena = arena_alloc(GIGABYTE(64));
G.evictor_scheduler_mutex = sys_mutex_alloc();
G.evictor_scheduler_shutdown_cv = sys_condition_variable_alloc();
G.evictor_scheduler_thread = sys_thread_alloc(sprite_evictor_scheduler_thread_entry_point, NULL, LIT("Sprite evictor scheduler"), PROF_THREAD_GROUP_EVICTORS);
app_register_exit_callback(&sprite_shutdown);
@ -269,10 +262,10 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(sprite_shutdown)
__prof;
/* Signal evictor shutdown */
{
struct sys_lock lock = sys_mutex_lock_e(G.evictor_scheduler_mutex);
struct snc_lock lock = snc_lock_e(&G.evictor_scheduler_mutex);
G.evictor_scheduler_shutdown = true;
sys_condition_variable_broadcast(G.evictor_scheduler_shutdown_cv);
sys_mutex_unlock(&lock);
snc_cv_broadcast(&G.evictor_scheduler_shutdown_cv);
snc_unlock(&lock);
}
sys_thread_wait_release(G.evictor_scheduler_thread);
}
@ -313,14 +306,14 @@ INTERNAL void push_load_job(struct cache_ref ref, struct sprite_tag tag)
{
struct load_cmd *cmd = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(G.load_cmds_mutex);
struct snc_lock lock = snc_lock_e(&G.load_cmds_mutex);
if (G.first_free_load_cmd) {
cmd = G.first_free_load_cmd;
G.first_free_load_cmd = cmd->next_free;
} else {
cmd = arena_push_no_zero(G.load_cmds_arena, struct load_cmd);
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
MEMZERO_STRUCT(cmd);
@ -400,7 +393,7 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag t
#if RESOURCE_RELOADING
struct cache_bin *bin = &G.cache.bins[e->hash.v % CACHE_BINS_COUNT];
struct sys_lock bin_lock = sys_mutex_lock_e(bin->mutex);
struct snc_lock bin_lock = snc_lock_e(&bin->mutex);
{
for (struct cache_entry *old_entry = bin->first; old_entry; old_entry = old_entry->next_in_bin) {
if (old_entry != e && old_entry->hash.v == e->hash.v) {
@ -409,7 +402,7 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag t
}
e->load_time_ns = sys_time_ns();
}
sys_mutex_unlock(&bin_lock);
snc_unlock(&bin_lock);
#endif
scratch_end(scratch);
@ -721,7 +714,7 @@ INTERNAL void cache_entry_load_sheet(struct cache_ref ref, struct sprite_tag tag
#if RESOURCE_RELOADING
struct cache_bin *bin = &G.cache.bins[e->hash.v % CACHE_BINS_COUNT];
struct sys_lock bin_lock = sys_mutex_lock_e(bin->mutex);
struct snc_lock bin_lock = snc_lock_e(&bin->mutex);
{
for (struct cache_entry *old_entry = bin->first; old_entry; old_entry = old_entry->next_in_bin) {
if (old_entry != e && old_entry->hash.v == e->hash.v) {
@ -730,7 +723,7 @@ INTERNAL void cache_entry_load_sheet(struct cache_ref ref, struct sprite_tag tag
}
e->load_time_ns = sys_time_ns();
}
sys_mutex_unlock(&bin_lock);
snc_unlock(&bin_lock);
#endif
scratch_end(scratch);
@ -792,10 +785,10 @@ INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref_unsafe(struct sprite_sc
return *slot;
}
INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref_from_entry(struct sprite_scope *scope, struct cache_entry *e, struct sys_lock *bin_lock)
INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref_from_entry(struct sprite_scope *scope, struct cache_entry *e, struct snc_lock *bin_lock)
{
/* Guaranteed safe if caller has lock on entry's bin, since entry may not have an existing reference and could otherwise be evicted while ensuring this reference */
sys_assert_locked_e_or_s(bin_lock, G.cache.bins[e->hash.v % CACHE_BINS_COUNT].mutex);
snc_assert_locked_e_or_s(bin_lock, &G.cache.bins[e->hash.v % CACHE_BINS_COUNT].mutex);
return scope_ensure_ref_unsafe(scope, e);
}
@ -856,12 +849,12 @@ void sprite_scope_end(struct sprite_scope *scope)
* Cache interface
* ========================== */
INTERNAL struct sprite_scope_cache_ref *cache_lookup(struct sprite_scope *scope, struct cache_entry_hash hash, struct sys_lock *bin_lock)
INTERNAL struct sprite_scope_cache_ref *cache_lookup(struct sprite_scope *scope, struct cache_entry_hash hash, struct snc_lock *bin_lock)
{
struct sprite_scope_cache_ref *scope_ref = NULL;
struct cache_bin *bin = &G.cache.bins[hash.v % CACHE_BINS_COUNT];
sys_assert_locked_e_or_s(bin_lock, bin->mutex); /* Lock required for iterating bin */
snc_assert_locked_e_or_s(bin_lock, &bin->mutex); /* Lock required for iterating bin */
#if RESOURCE_RELOADING
/* If resource reloading is enabled, then we want to find the
@ -916,16 +909,16 @@ INTERNAL struct sprite_scope_cache_ref *cache_entry_from_tag(struct sprite_scope
/* Search in cache */
if (!force_new) {
struct sys_lock bin_lock = sys_mutex_lock_s(bin->mutex);
struct snc_lock bin_lock = snc_lock_s(&bin->mutex);
{
scope_ref = cache_lookup(scope, hash, &bin_lock);
}
sys_mutex_unlock(&bin_lock);
snc_unlock(&bin_lock);
}
/* If not in cache, allocate new entry */
if (!scope_ref) {
struct sys_lock bin_lock = sys_mutex_lock_e(bin->mutex);
struct snc_lock bin_lock = snc_lock_e(&bin->mutex);
{
/* Search cache one more time in case an entry was allocated between locks */
if (!force_new) {
@ -936,14 +929,14 @@ INTERNAL struct sprite_scope_cache_ref *cache_entry_from_tag(struct sprite_scope
/* Cache entry still absent, allocate new entry */
struct cache_entry *entry = NULL;
{
struct sys_lock pool_lock = sys_mutex_lock_e(G.cache.entry_pool_mutex);
struct snc_lock pool_lock = snc_lock_e(&G.cache.entry_pool_mutex);
if (G.cache.entry_pool_first_free) {
entry = G.cache.entry_pool_first_free;
G.cache.entry_pool_first_free = entry->next_free;
} else {
entry = arena_push_no_zero(G.cache.arena, struct cache_entry);
}
sys_mutex_unlock(&pool_lock);
snc_unlock(&pool_lock);
}
MEMZERO_STRUCT(entry);
@ -965,7 +958,7 @@ INTERNAL struct sprite_scope_cache_ref *cache_entry_from_tag(struct sprite_scope
scope_ref = scope_ensure_ref_from_entry(scope, entry, &bin_lock);
}
}
sys_mutex_unlock(&bin_lock);
snc_unlock(&bin_lock);
}
}
@ -1158,13 +1151,13 @@ INTERNAL SYS_JOB_DEF(sprite_load_job, job)
}
/* Free cmd */
struct sys_lock lock = sys_mutex_lock_e(G.load_cmds_mutex);
struct snc_lock lock = snc_lock_e(&G.load_cmds_mutex);
{
sprite_scope_end(cmd->scope);
cmd->next_free = G.first_free_load_cmd;
G.first_free_load_cmd = cmd;
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* ========================== *
@ -1178,11 +1171,11 @@ INTERNAL void reload_if_exists(struct sprite_scope *scope, struct sprite_tag tag
struct cache_entry_hash hash = cache_entry_hash_from_tag_hash(tag.hash, kind);
struct cache_bin *bin = &G.cache.bins[hash.v % CACHE_BINS_COUNT];
struct sprite_scope_cache_ref *existing_ref = NULL;
struct sys_lock bin_lock = sys_mutex_lock_s(bin->mutex);
struct snc_lock bin_lock = snc_lock_s(&bin->mutex);
{
existing_ref = cache_lookup(scope, hash, &bin_lock);
}
sys_mutex_unlock(&bin_lock);
snc_unlock(&bin_lock);
if (existing_ref) {
logf_info("Sprite resource file \"%F\" has changed for sprite [%F].", FMT_STR(tag.path), FMT_HEX(hash.v));
@ -1244,7 +1237,7 @@ INTERNAL SYS_JOB_DEF(sprite_evictor_job, _)
__profn("Evictor scan");
for (u64 i = 0; i < CACHE_BINS_COUNT; ++i) {
struct cache_bin *bin = &G.cache.bins[i];
struct sys_lock bin_lock = sys_mutex_lock_s(bin->mutex);
struct snc_lock bin_lock = snc_lock_s(&bin->mutex);
{
struct cache_entry *n = bin->first;
while (n) {
@ -1273,7 +1266,7 @@ INTERNAL SYS_JOB_DEF(sprite_evictor_job, _)
n = n->next_in_bin;
}
}
sys_mutex_unlock(&bin_lock);
snc_unlock(&bin_lock);
}
}
@ -1297,7 +1290,7 @@ INTERNAL SYS_JOB_DEF(sprite_evictor_job, _)
struct cache_entry *entry = en->cache_entry;
i32 last_ref_cycle = en->last_ref_cycle;
b32 cache_over_budget_target = atomic_u64_fetch(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET_TARGET;
struct sys_lock bin_lock = sys_mutex_lock_e(bin->mutex);
struct snc_lock bin_lock = snc_lock_e(&bin->mutex);
{
u64 refcount_uncast = atomic_u64_fetch(&entry->refcount_struct);
struct cache_refcount refcount = *(struct cache_refcount *)&refcount_uncast;
@ -1328,7 +1321,7 @@ INTERNAL SYS_JOB_DEF(sprite_evictor_job, _)
stop_evicting = true;
}
}
sys_mutex_unlock(&bin_lock);
snc_unlock(&bin_lock);
}
}
@ -1348,13 +1341,13 @@ INTERNAL SYS_JOB_DEF(sprite_evictor_job, _)
/* Add evicted nodes to free list */
{
__profn("Evictor free list append");
struct sys_lock pool_lock = sys_mutex_lock_e(G.cache.entry_pool_mutex);
struct snc_lock pool_lock = snc_lock_e(&G.cache.entry_pool_mutex);
for (struct evict_node *en = first_evicted; en; en = en->next_evicted) {
struct cache_entry *n = en->cache_entry;
n->next_free = G.cache.entry_pool_first_free;
G.cache.entry_pool_first_free = n;
}
sys_mutex_unlock(&pool_lock);
snc_unlock(&pool_lock);
}
}
}
@ -1377,19 +1370,19 @@ INTERNAL SYS_JOB_DEF(sprite_evictor_job, _)
INTERNAL SYS_THREAD_DEF(sprite_evictor_scheduler_thread_entry_point, arg)
{
(UNUSED)arg;
struct sys_counter *job_counter = sys_counter_alloc();
struct sys_lock evictor_lock = sys_mutex_lock_e(G.evictor_scheduler_mutex);
struct snc_lock evictor_lock = snc_lock_e(&G.evictor_scheduler_mutex);
while (!G.evictor_scheduler_shutdown) {
sys_run(1, sprite_evictor_job, NULL, SYS_PRIORITY_BACKGROUND, job_counter);
sys_counter_wait(job_counter);
struct snc_counter counter = ZI;
sys_run(1, sprite_evictor_job, NULL, SYS_PRIORITY_BACKGROUND, &counter);
snc_counter_wait_ltez(&counter);
/* FIXME: Enable this */
#if 0
sys_condition_variable_wait_time(G.evictor_scheduler_shutdown_cv, &evictor_lock, SECONDS_FROM_NS(EVICTOR_CYCLE_INTERVAL_NS));
snc_cv_wait_time(G.evictor_scheduler_shutdown_cv, &evictor_lock, SECONDS_FROM_NS(EVICTOR_CYCLE_INTERVAL_NS));
#else
sys_mutex_unlock(&evictor_lock);
snc_unlock(&evictor_lock);
sys_sleep(SECONDS_FROM_NS(EVICTOR_CYCLE_INTERVAL_NS));
evictor_lock = sys_mutex_lock_e(G.evictor_scheduler_mutex);
evictor_lock = snc_lock_e(&G.evictor_scheduler_mutex);
#endif
}
sys_mutex_unlock(&evictor_lock);
snc_unlock(&evictor_lock);
}

View File

@ -358,38 +358,6 @@ void sys_window_cursor_hide(struct sys_window *sys_window);
void sys_window_cursor_enable_clip(struct sys_window *sys_window, struct rect bounds);
void sys_window_cursor_disable_clip(struct sys_window *sys_window);
/* ========================== *
* Mutex
* ========================== */
struct sys_lock {
struct sys_mutex *mutex;
b32 exclusive;
};
struct sys_mutex *sys_mutex_alloc(void);
void sys_mutex_release(struct sys_mutex *mutex);
struct sys_lock sys_mutex_lock_e(struct sys_mutex *mutex);
struct sys_lock sys_mutex_lock_s(struct sys_mutex *mutex);
void sys_mutex_unlock(struct sys_lock *lock);
#if RTC
void sys_assert_locked_e(struct sys_lock *lock, struct sys_mutex *mutex);
void sys_assert_locked_e_or_s(struct sys_lock *lock, struct sys_mutex *mutex);
#else
# define sys_assert_locked_e(l, m) (UNUSED)l
# define sys_assert_locked_e_or_s(l, m) (UNUSED)l
#endif
/* ========================== *
* Condition variable
* ========================== */
struct sys_condition_variable *sys_condition_variable_alloc(void);
void sys_condition_variable_release(struct sys_condition_variable *sys_cv);
void sys_condition_variable_wait(struct sys_condition_variable *sys_cv, struct sys_lock *lock);
void sys_condition_variable_broadcast(struct sys_condition_variable *sys_cv);
/* ========================== *
* Threads
* ========================== */
@ -492,15 +460,6 @@ void sys_wake_all(void *addr);
i32 sys_current_fiber_id(void);
/* ========================== *
* Counter
* ========================== */
struct sys_counter *sys_counter_alloc(void);
void sys_counter_release(struct sys_counter *counter);
void sys_counter_add(struct sys_counter *counter, i64 amount);
void sys_counter_wait(struct sys_counter *counter);
/* ========================== *
* Job
* ========================== */
@ -521,7 +480,8 @@ struct sys_job_data {
#define SYS_JOB_DEF(job_name, arg_name) void job_name(struct sys_job_data arg_name)
typedef SYS_JOB_DEF(sys_job_func, job_data);
void sys_run(i32 count, sys_job_func *func, void *sig, enum sys_priority priority, struct sys_counter *counter);
struct snc_counter;
void sys_run(i32 count, sys_job_func *func, void *sig, enum sys_priority priority, struct snc_counter *counter);
/* ========================== *
* Scratch context

View File

@ -71,11 +71,11 @@ struct win32_window {
HWND hwnd;
u32 tid;
struct sync_flag ready_sf;
struct snc_counter ready_fence;
u16 utf16_high_surrogate_last_input;
struct sys_mutex *settings_mutex;
struct snc_mutex settings_mutex;
struct sys_window_settings settings;
i32 monitor_width;
@ -93,7 +93,7 @@ struct win32_window {
struct atomic_i32 event_thread_shutdown;
struct sys_thread *event_thread;
struct sys_mutex *event_callbacks_mutex;
struct snc_mutex event_callbacks_mutex;
sys_window_event_callback_func *event_callbacks[SYS_WINDOW_EVENT_LISTENERS_MAX];
u64 event_callbacks_count;
@ -150,7 +150,7 @@ struct alignas(64) yielder {
/* =================================================== */
void *job_sig; /* 8 bytes */
/* =================================================== */
struct counter *job_counter; /* 8 bytes */
struct snc_counter *job_counter; /* 8 bytes */
/* =================================================== */
struct yielder *next; /* 8 bytes */
/* =================================================== */
@ -261,7 +261,7 @@ struct job_info {
i32 count;
sys_job_func *func;
void *sig;
struct counter *counter;
struct snc_counter *counter;
i32 fiber_id; /* If the job is being resumed from a yield */
@ -307,34 +307,24 @@ GLOBAL struct {
wchar_t panic_wstr[4096];
HANDLE panic_event;
/* Lookup tables */
/* Key lookup table */
enum sys_btn vk_btn_table[256];
/* Mutexes pool */
struct sys_mutex *mutexes_mutex;
struct arena *mutexes_arena;
struct win32_mutex *first_free_mutex;
/* Condition variables pool */
struct sys_mutex *condition_variables_mutex;
struct arena *condition_variables_arena;
struct win32_condition_variable *first_free_condition_variable;
/* Threads pool */
struct sys_mutex *threads_mutex;
struct snc_mutex threads_mutex;
struct arena *threads_arena;
struct win32_thread *threads_first;
struct win32_thread *threads_last;
struct win32_thread *threads_first_free;
/* Watches pool */
struct sys_mutex *watches_mutex;
struct snc_mutex watches_mutex;
struct arena *watches_arena;
struct win32_watch *watches_first_free;
/* Windows pool */
WNDCLASSEXW window_class;
struct sys_mutex *windows_mutex;
struct snc_mutex windows_mutex;
struct arena *windows_arena;
struct win32_window *first_free_window;
@ -359,11 +349,6 @@ GLOBAL struct {
struct fiber fibers[SYS_MAX_FIBERS];
struct fiber_ctx fiber_contexts[SYS_MAX_FIBERS];
/* Counters */
struct arena *counters_arena;
struct atomic_i32 counters_lock; /* TODO: Prevent false sharing */
struct counter *first_free_counter;
/* Jobs */
struct job_queue job_queues[NUM_JOB_QUEUE_KINDS];
@ -615,63 +600,6 @@ INTERNAL struct fiber_ctx *fiber_ctx_from_id(i32 id)
return &G.fiber_contexts[id];
}
/* ========================== *
* Counters
* ========================== */
INTERNAL struct counter *counter_alloc(void)
{
struct counter *counter = NULL;
{
while (atomic_i32_fetch_test_set(&G.counters_lock, 0, 1) != 0) ix_pause();
{
if (G.first_free_counter) {
counter = G.first_free_counter;
G.first_free_counter = counter->next_free;
} else {
counter = arena_push_no_zero(G.counters_arena, struct counter);
}
}
atomic_i32_fetch_set(&G.counters_lock, 0);
}
MEMZERO_STRUCT(counter);
return counter;
}
INTERNAL void counter_release(struct counter *counter)
{
while (atomic_i32_fetch_test_set(&G.counters_lock, 0, 1) != 0) ix_pause();
{
counter->next_free = G.first_free_counter;
G.first_free_counter = counter;
}
atomic_i32_fetch_set(&G.counters_lock, 0);
}
INTERNAL void counter_add(struct counter *counter, i64 amount)
{
i64 old_v = atomic_i64_fetch_add(&counter->v, amount);
i64 new_v = old_v + amount;
if (old_v > 0 && new_v <= 0) {
sys_wake_all(&counter->v);
}
}
INTERNAL void counter_wait(struct counter *counter)
{
/* TODO: Spin with configurable count */
i64 v = atomic_i64_fetch(&counter->v);
while (v > 0) {
sys_wait(&counter->v, &v, sizeof(v));
v = atomic_i64_fetch(&counter->v);
}
}
struct sys_counter *sys_counter_alloc(void) { return (struct sys_counter *)counter_alloc(); }
void sys_counter_release(struct sys_counter *counter) { counter_release((struct counter *)counter); }
void sys_counter_add(struct sys_counter *counter, i64 amount) { counter_add((struct counter *)counter, amount); }
void sys_counter_wait(struct sys_counter *counter) { counter_wait((struct counter *)counter); }
/* ========================== *
* Test job
* ========================== */
@ -681,12 +609,11 @@ i32 sys_current_fiber_id(void)
return (i32)(i64)GetFiberData();
}
void sys_run(i32 count, sys_job_func *func, void *sig, enum sys_priority priority, struct sys_counter *counter)
void sys_run(i32 count, sys_job_func *func, void *sig, enum sys_priority priority, struct snc_counter *counter)
{
struct counter *job_counter = (struct counter *)counter;
if (count > 0) {
if (job_counter) {
counter_add(job_counter, count);
if (counter) {
snc_counter_add(counter, count);
}
struct fiber *fiber = fiber_from_id(sys_current_fiber_id());
priority = clamp_i32(priority, fiber->job_priority, SYS_PRIORITY_BACKGROUND); /* A job cannot create a job with a higher priority than itself */
@ -706,7 +633,7 @@ void sys_run(i32 count, sys_job_func *func, void *sig, enum sys_priority priorit
info->count = count;
info->func = func;
info->sig = sig;
info->counter = job_counter;
info->counter = counter;
if (queue->last) {
queue->last->next = info;
} else {
@ -817,7 +744,7 @@ INTERNAL SYS_THREAD_DEF(worker_entry, worker_ctx_arg)
i32 job_id = 0;
sys_job_func *job_func = 0;
void *job_sig = 0;
struct counter *job_counter = 0;
struct snc_counter *job_counter = 0;
{
//__profnc("Pull job", RGB32_F(0.75, 0.75, 0));
for (u32 queue_index = 0; queue_index < countof(queues) && !job_func; ++queue_index) {
@ -993,7 +920,7 @@ INTERNAL SYS_THREAD_DEF(worker_entry, worker_ctx_arg)
case YIELD_KIND_DONE:
{
if (job_counter) {
counter_add(job_counter, -1);
snc_counter_add(job_counter, -1);
}
done = true;
} break;
@ -1634,7 +1561,7 @@ struct sys_watch *sys_watch_alloc(struct string dir_path)
struct win32_watch *w32_watch = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(G.watches_mutex);
struct snc_lock lock = snc_lock_e(&G.watches_mutex);
{
if (G.watches_first_free) {
w32_watch = G.watches_first_free;
@ -1643,7 +1570,7 @@ struct sys_watch *sys_watch_alloc(struct string dir_path)
w32_watch = arena_push_no_zero(G.watches_arena, struct win32_watch);
}
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
MEMZERO_STRUCT(w32_watch);
@ -1670,12 +1597,12 @@ void sys_watch_release(struct sys_watch *dw)
CloseHandle(w32_watch->dir_handle);
CloseHandle(w32_watch->wake_handle);
struct sys_lock lock = sys_mutex_lock_e(G.watches_mutex);
struct snc_lock lock = snc_lock_e(&G.watches_mutex);
{
w32_watch->next_free = G.watches_first_free;
G.watches_first_free = w32_watch;
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
struct sys_watch_info_list sys_watch_wait(struct arena *arena, struct sys_watch *dw)
@ -1823,11 +1750,11 @@ INTERNAL void win32_window_wake(struct win32_window *window);
INTERNAL void win32_window_process_event(struct win32_window *window, struct sys_event event)
{
__prof;
struct sys_lock lock = sys_mutex_lock_e(window->event_callbacks_mutex);
struct snc_lock lock = snc_lock_e(&window->event_callbacks_mutex);
for (u64 i = 0; i < window->event_callbacks_count; ++i) {
window->event_callbacks[i](event);
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
INTERNAL HWND win32_create_window(struct win32_window *window)
@ -1877,7 +1804,7 @@ INTERNAL SYS_THREAD_DEF(window_thread_entry_point, arg)
win32_update_window_from_system(window);
BringWindowToTop(window->hwnd);
sync_flag_set(&window->ready_sf);
snc_counter_add(&window->ready_fence, 1);
while (!atomic_i32_fetch(&window->event_thread_shutdown)) {
MSG msg = ZI;
@ -1957,36 +1884,29 @@ INTERNAL struct win32_window *win32_window_alloc(void)
{
struct win32_window *window = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(G.windows_mutex);
struct snc_lock lock = snc_lock_e(&G.windows_mutex);
if (G.first_free_window) {
window = G.first_free_window;
G.first_free_window = window->next_free;
} else {
window = arena_push_no_zero(G.windows_arena, struct win32_window);
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
MEMZERO_STRUCT(window);
/* Allocate sync flag */
window->ready_sf = sync_flag_alloc();
/* Allocate mutexes */
window->settings_mutex = sys_mutex_alloc();
window->event_callbacks_mutex = sys_mutex_alloc();
/* Start window thread for processing events */
window->event_thread = sys_thread_alloc(&window_thread_entry_point, window, LIT("Window thread"), PROF_THREAD_GROUP_WINDOW);
/* Wait for event thread to create actual window */
sync_flag_wait(&window->ready_sf);
snc_counter_wait_gtz(&window->ready_fence);
return window;
}
INTERNAL void win32_window_release(struct win32_window *window)
{
struct sys_lock lock = sys_mutex_lock_e(G.windows_mutex);
struct snc_lock lock = snc_lock_e(&G.windows_mutex);
window->next_free = G.first_free_window;
G.first_free_window = window;
@ -1996,14 +1916,7 @@ INTERNAL void win32_window_release(struct win32_window *window)
win32_window_wake(window);
sys_thread_wait_release(window->event_thread);
/* Release mutexes */
sys_mutex_release(window->event_callbacks_mutex);
sys_mutex_release(window->settings_mutex);
/* Release sync flag */
sync_flag_release(&window->ready_sf);
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
INTERNAL void win32_update_window_from_system(struct win32_window *window)
@ -2382,7 +2295,7 @@ void sys_window_release(struct sys_window *sys_window)
void sys_window_register_event_callback(struct sys_window *sys_window, sys_window_event_callback_func *func)
{
struct win32_window *window = (struct win32_window *)sys_window;
struct sys_lock lock = sys_mutex_lock_e(window->event_callbacks_mutex);
struct snc_lock lock = snc_lock_e(&window->event_callbacks_mutex);
{
if (window->event_callbacks_count + 1 > countof(window->event_callbacks)) {
sys_panic(LIT("Too many window event callbacks registered"));
@ -2390,14 +2303,14 @@ void sys_window_register_event_callback(struct sys_window *sys_window, sys_windo
window->event_callbacks[window->event_callbacks_count++] = func;
}
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
void sys_window_unregister_event_callback(struct sys_window *sys_window, sys_window_event_callback_func *func)
{
struct win32_window *window = (struct win32_window *)sys_window;
struct sys_lock lock = sys_mutex_lock_e(window->event_callbacks_mutex);
struct snc_lock lock = snc_lock_e(&window->event_callbacks_mutex);
{
u64 count = window->event_callbacks_count;
sys_window_event_callback_func *last = count > 0 ? window->event_callbacks[count - 1] : NULL;
@ -2412,18 +2325,18 @@ void sys_window_unregister_event_callback(struct sys_window *sys_window, sys_win
}
}
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
void sys_window_update_settings(struct sys_window *sys_window, struct sys_window_settings *settings)
{
__prof;
struct win32_window *window = (struct win32_window *)sys_window;
struct sys_lock lock = sys_mutex_lock_e(window->settings_mutex);
struct snc_lock lock = snc_lock_e(&window->settings_mutex);
{
win32_update_window_from_settings(window, settings);
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* FIXME: Lock settings mutex for these functions */
@ -2438,7 +2351,7 @@ void sys_window_show(struct sys_window *sys_window)
{
struct win32_window *window = (struct win32_window *)sys_window;
HWND hwnd = window->hwnd;
struct sys_lock lock = sys_mutex_lock_e(window->settings_mutex);
struct snc_lock lock = snc_lock_e(&window->settings_mutex);
{
i32 show_cmd = SW_NORMAL;
struct sys_window_settings *settings = &window->settings;
@ -2451,7 +2364,7 @@ void sys_window_show(struct sys_window *sys_window)
ShowWindow(hwnd, show_cmd);
BringWindowToTop(hwnd);
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
struct v2 sys_window_get_size(struct sys_window *sys_window)
@ -2516,7 +2429,7 @@ void sys_window_cursor_disable_clip(struct sys_window *sys_window)
INTERNAL struct win32_thread *win32_thread_alloc(void)
{
struct win32_thread *t = NULL;
struct sys_lock lock = sys_mutex_lock_e(G.threads_mutex);
struct snc_lock lock = snc_lock_e(&G.threads_mutex);
{
if (G.threads_first_free) {
t = G.threads_first_free;
@ -2533,13 +2446,13 @@ INTERNAL struct win32_thread *win32_thread_alloc(void)
t->prev = G.threads_last;
G.threads_last = t;
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
return t;
}
INTERNAL void win32_thread_release(struct win32_thread *t)
{
struct sys_lock lock = sys_mutex_lock_e(G.threads_mutex);
struct snc_lock lock = snc_lock_e(&G.threads_mutex);
{
if (t->prev) {
t->prev->next = t->next;
@ -2555,7 +2468,7 @@ INTERNAL void win32_thread_release(struct win32_thread *t)
}
t->next = G.threads_first_free;
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
INTERNAL DWORD WINAPI win32_thread_proc(LPVOID vt)
@ -3014,9 +2927,6 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
G.num_fibers = 1; /* Fiber at index 0 always nil */
G.fiber_names_arena = arena_alloc(GIGABYTE(64));
/* Init counters */
G.counters_arena = arena_alloc(GIGABYTE(64));
/* Init job queues */
for (u32 i = 0; i < countof(G.job_queues); ++i) {
struct job_queue *queue = &G.job_queues[i];
@ -3062,25 +2972,13 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
/* Set up timing period */
timeBeginPeriod(G.scheduler_period_ms);
/* Setup mutexes */
G.mutexes_arena = arena_alloc(GIGABYTE(64));
struct win32_mutex *first_mutex = arena_push(G.mutexes_arena, struct win32_mutex);
G.mutexes_mutex = (struct sys_mutex *)first_mutex;
/* Set up condition variables */
G.condition_variables_mutex = sys_mutex_alloc();
G.condition_variables_arena = arena_alloc(GIGABYTE(64));
/* Set up threads */
G.threads_mutex = sys_mutex_alloc();
G.threads_arena = arena_alloc(GIGABYTE(64));
/* Set up watches */
G.watches_mutex = sys_mutex_alloc();
G.watches_arena = arena_alloc(GIGABYTE(64));
/* Set up windows */
G.windows_mutex = sys_mutex_alloc();
G.windows_arena = arena_alloc(GIGABYTE(64));
/* Initialize vk table */
@ -3139,12 +3037,12 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
/* Get app thread handle */
HANDLE app_thread_handle = 0;
struct sys_lock lock = sys_mutex_lock_s(G.threads_mutex);
struct snc_lock lock = snc_lock_s(&G.threads_mutex);
{
struct win32_thread *wt = (struct win32_thread *)app_thread;
app_thread_handle = wt->handle;
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
/* Wait for either app thread exit or panic */
@ -3168,7 +3066,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
/* Find any dangling threads that haven't exited gracefully by now */
if (!atomic_i32_fetch(&G.panicking)) {
struct sys_lock lock = sys_mutex_lock_s(G.threads_mutex);
struct snc_lock lock = snc_lock_s(&G.threads_mutex);
if (G.threads_first) {
struct arena_temp scratch = scratch_begin_no_conflict();
u64 num_dangling_threads = 0;
@ -3183,7 +3081,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
sys_panic(threads_msg);
scratch_end(scratch);
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* Check if panicking */

View File

@ -83,7 +83,7 @@ GLOBAL struct {
b32 debug_draw;
/* Debug console */
struct sys_mutex *console_logs_mutex;
struct snc_mutex console_logs_mutex;
struct arena *console_logs_arena;
struct console_log *first_console_log;
struct console_log *last_console_log;
@ -93,11 +93,11 @@ GLOBAL struct {
b32 profiler_launched;
/* Window -> user */
struct sys_mutex *sys_events_mutex;
struct snc_mutex sys_events_mutex;
struct arena *sys_events_arena;
/* User -> local sim */
struct sys_mutex *user_sim_cmd_mutex;
struct snc_mutex user_sim_cmd_mutex;
struct sim_control user_sim_cmd_control;
struct sim_ent_id user_hovered_ent;
u64 last_user_sim_cmd_gen;
@ -107,7 +107,7 @@ GLOBAL struct {
struct atomic_i32 user_paused_steps;
/* Local sim -> user */
struct sys_mutex *local_to_user_client_mutex;
struct snc_mutex local_to_user_client_mutex;
struct sim_client_store *local_to_user_client_store;
struct sim_client *local_to_user_client;
i64 local_to_user_client_publish_dt_ns;
@ -228,7 +228,6 @@ struct user_startup_receipt user_startup(struct gp_startup_receipt *gp_sr,
G.average_local_to_user_snapshot_publish_dt_ns = NS_FROM_SECONDS(1) / SIM_TICKS_PER_SECOND;
/* Sys events */
G.sys_events_mutex = sys_mutex_alloc();
G.sys_events_arena = arena_alloc(GIGABYTE(64));
/* User blend clients */
@ -238,19 +237,14 @@ struct user_startup_receipt user_startup(struct gp_startup_receipt *gp_sr,
G.ss_blended = sim_snapshot_nil();
/* Local to user client */
G.local_to_user_client_mutex = sys_mutex_alloc();
G.local_to_user_client_store = sim_client_store_alloc();
G.local_to_user_client = sim_client_alloc(G.local_to_user_client_store);
/* User sim control */
G.user_sim_cmd_mutex = sys_mutex_alloc();
/* GPU handles */
G.world_to_user_xf = XFORM_IDENT;
G.world_gp_flow = gp_flow_alloc();
G.ui_gp_flow = gp_flow_alloc();
G.console_logs_mutex = sys_mutex_alloc();
G.console_logs_arena = arena_alloc(GIGABYTE(64));
//log_register_callback(debug_console_log_callback, LOG_LEVEL_SUCCESS);
log_register_callback(debug_console_log_callback, LOG_LEVEL_DEBUG);
@ -281,7 +275,7 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(user_shutdown)
INTERNAL struct sys_event_array pop_sys_events(struct arena *arena)
{
struct sys_event_array array = ZI;
struct sys_lock lock = sys_mutex_lock_e(G.sys_events_mutex);
struct snc_lock lock = snc_lock_e(&G.sys_events_mutex);
{
struct sys_event *src_events = (struct sys_event *)arena_base(G.sys_events_arena);
array.count = G.sys_events_arena->pos / sizeof(*src_events);
@ -289,17 +283,17 @@ INTERNAL struct sys_event_array pop_sys_events(struct arena *arena)
MEMCPY(array.events, src_events, array.count * sizeof(*src_events));
arena_reset(G.sys_events_arena);
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
return array;
}
INTERNAL SYS_WINDOW_EVENT_CALLBACK_FUNC_DEF(window_event_callback, event)
{
struct sys_lock lock = sys_mutex_lock_e(G.sys_events_mutex);
struct snc_lock lock = snc_lock_e(&G.sys_events_mutex);
{
*arena_push_no_zero(G.sys_events_arena, struct sys_event) = event;
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* ========================== *
@ -442,7 +436,7 @@ INTERNAL struct string get_ent_debug_text(struct arena *arena, struct sim_ent *e
INTERNAL LOG_EVENT_CALLBACK_FUNC_DEF(debug_console_log_callback, log)
{
__prof;
struct sys_lock lock = sys_mutex_lock_e(G.console_logs_mutex);
struct snc_lock lock = snc_lock_e(&G.console_logs_mutex);
{
struct console_log *clog = arena_push(G.console_logs_arena, struct console_log);
clog->level = log.level;
@ -462,7 +456,7 @@ INTERNAL LOG_EVENT_CALLBACK_FUNC_DEF(debug_console_log_callback, log)
}
G.last_console_log = clog;
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
INTERNAL void draw_debug_console(i32 level, b32 minimized)
@ -503,7 +497,7 @@ INTERNAL void draw_debug_console(i32 level, b32 minimized)
i64 now_ns = sys_time_ns();
struct font *font = font_load_async(LIT("font/fixedsys.ttf"), 12.0f);
if (font) {
struct sys_lock lock = sys_mutex_lock_e(G.console_logs_mutex);
struct snc_lock lock = snc_lock_e(&G.console_logs_mutex);
{
for (struct console_log *log = G.last_console_log; log; log = log->prev) {
f32 opacity = 0.75;
@ -550,7 +544,7 @@ INTERNAL void draw_debug_console(i32 level, b32 minimized)
}
}
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
if (bounds_top < F32_INFINITY && bounds_bottom > -F32_INFINITY) {
G.console_logs_height = bounds_bottom - bounds_top;
@ -622,7 +616,7 @@ INTERNAL void user_update(void)
* ========================== */
{
struct sys_lock lock = sys_mutex_lock_e(G.local_to_user_client_mutex);
struct snc_lock lock = snc_lock_e(&G.local_to_user_client_mutex);
u64 old_last_tick = G.user_unblended_client->last_tick;
u64 last_tick = G.local_to_user_client->last_tick;
if (last_tick > old_last_tick) {
@ -639,7 +633,7 @@ INTERNAL void user_update(void)
G.average_local_to_user_snapshot_publish_dt_ns += G.local_to_user_client_publish_dt_ns / 1;
#endif
}
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
/* ========================== *
@ -1850,7 +1844,7 @@ INTERNAL void user_update(void)
/* Set user sim control */
{
struct sys_lock lock = sys_mutex_lock_e(G.user_sim_cmd_mutex);
struct snc_lock lock = snc_lock_e(&G.user_sim_cmd_mutex);
/* Reset flags */
if (G.user_sim_cmd_gen != G.last_user_sim_cmd_gen) {
@ -1862,7 +1856,7 @@ INTERNAL void user_update(void)
G.user_sim_cmd_control = control;
G.user_sim_cmd_control.flags |= old_flags;
G.user_hovered_ent = hovered_ent->id;
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
}
@ -2141,7 +2135,7 @@ INTERNAL void generate_user_input_cmds(struct sim_client *user_input_client, u64
sim_ent_activate(control_cmd, user_input_ss->tick);
}
{
struct sys_lock lock = sys_mutex_lock_e(G.user_sim_cmd_mutex);
struct snc_lock lock = snc_lock_e(&G.user_sim_cmd_mutex);
/* Update control cmd */
{
control_cmd->cmd_control = G.user_sim_cmd_control;
@ -2156,7 +2150,7 @@ INTERNAL void generate_user_input_cmds(struct sim_client *user_input_client, u64
}
#endif
++G.user_sim_cmd_gen;
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
}
@ -2759,14 +2753,14 @@ INTERNAL SYS_JOB_DEF(local_sim_job, _)
struct sim_snapshot *local_ss = sim_snapshot_from_tick(local_client, local_client->last_tick);
if (local_ss->valid) {
/* TODO: Double buffer */
struct sys_lock lock = sys_mutex_lock_e(G.local_to_user_client_mutex);
struct snc_lock lock = snc_lock_e(&G.local_to_user_client_mutex);
sim_snapshot_alloc(G.local_to_user_client, local_ss, local_ss->tick);
i64 publish_ns = sys_time_ns();
G.local_to_user_client_publish_dt_ns = publish_ns - last_publish_to_user_ns;
G.local_to_user_client_publish_time_ns = publish_ns;
last_publish_to_user_ns = publish_ns;
sim_snapshot_release_ticks_in_range(G.local_to_user_client, 0, local_ss->tick - 1);
sys_mutex_unlock(&lock);
snc_unlock(&lock);
}
}

View File

@ -7,6 +7,7 @@
#include "arena.h"
#include "atomic.h"
#include "math.h"
#include "snc.h"
/* Utility functions and stuff that don't have a home :( */
@ -256,49 +257,6 @@ INLINE void dict_remove_entry(struct dict *dict, struct dict_entry *entry)
}
}
/* ========================== *
* Sync flag
* ========================== */
struct sync_flag {
struct sys_mutex *mutex;
struct sys_condition_variable *cv;
b32 flag;
};
INLINE struct sync_flag sync_flag_alloc(void)
{
struct sync_flag sf = ZI;
sf.mutex = sys_mutex_alloc();
sf.cv = sys_condition_variable_alloc();
return sf;
}
INLINE void sync_flag_release(struct sync_flag *sf)
{
sys_mutex_release(sf->mutex);
sys_condition_variable_release(sf->cv);
}
INLINE void sync_flag_set(struct sync_flag *sf)
{
__prof;
struct sys_lock lock = sys_mutex_lock_e(sf->mutex);
sf->flag = 1;
sys_condition_variable_broadcast(sf->cv);
sys_mutex_unlock(&lock);
}
INLINE void sync_flag_wait(struct sync_flag *sf)
{
__prof;
struct sys_lock lock = sys_mutex_lock_s(sf->mutex);
while (sf->flag != 1) {
sys_condition_variable_wait(sf->cv, &lock);
}
sys_mutex_unlock(&lock);
}
/* ========================== *
* Sleep frame
* ========================== */