more fleshed out atomics for i32, i64, u32, u64

This commit is contained in:
jacob 2024-03-15 22:37:49 -05:00
parent edadca820a
commit 3b0cdfd1f4
19 changed files with 330 additions and 182 deletions

View File

@ -144,6 +144,7 @@ target_precompile_headers(powerplay_exe PRIVATE src/common.h)
# Common flags
set(COMPILER_FLAGS "
-fno-strict-aliasing \
-fno-finite-loops \
-msse4.2 \
")

BIN
res/graphics/crosshair.ase (Stored with Git LFS)

Binary file not shown.

View File

@ -56,12 +56,14 @@ INTERNAL void refresh_dbg_table(void)
{
#if RTC
sys_mutex_lock(&L.dbg_table_mutex);
MEMZERO_ARRAY(L.dbg_table);
L.dbg_table_count = 0;
for (u64 i = 0; i < ARRAY_COUNT(L.lookup); ++i) {
struct asset *asset = &L.lookup[i];
if (asset->hash != 0) {
L.dbg_table[L.dbg_table_count++] = asset;
{
MEMZERO_ARRAY(L.dbg_table);
L.dbg_table_count = 0;
for (u64 i = 0; i < ARRAY_COUNT(L.lookup); ++i) {
struct asset *asset = &L.lookup[i];
if (asset->hash != 0) {
L.dbg_table[L.dbg_table_count++] = asset;
}
}
}
sys_mutex_unlock(&L.dbg_table_mutex);

View File

@ -3,13 +3,113 @@
#if OS_WINDOWS
/* winnt.h declarations */
/* atomic_i32 */
i64 _InterlockedIncrement64(i64 volatile *addend);
i64 _InterlockedDecrement64(i64 volatile *addend);
FORCE_INLINE i32 atomic_i32_eval(struct atomic_i32 *x)
{
return _InterlockedExchangeAdd((volatile long *)&x->_v, 0);
}
FORCE_INLINE i32 atomic_i32_inc_eval(struct atomic_i32 *x)
{
return _InterlockedIncrement((volatile long *)&x->_v);
}
FORCE_INLINE i32 atomic_i32_dec_eval(struct atomic_i32 *x)
{
return _InterlockedDecrement((volatile long *)&x->_v);
}
FORCE_INLINE i32 atomic_i32_eval_add(struct atomic_i32 *x, i32 a)
{
return _InterlockedExchangeAdd((volatile long *)&x->_v, a);
}
FORCE_INLINE i32 atomic_i32_eval_exchange(struct atomic_i32 *x, i32 e)
{
return _InterlockedExchange((volatile long *)&x->_v, e);
}
FORCE_INLINE i32 atomic_i32_eval_compare_exchange(struct atomic_i32 *x, i32 c, i32 e)
{
return _InterlockedCompareExchange((volatile long *)&x->_v, e, c);
}
#define atomic_inc_eval64(ptr) _InterlockedIncrement64(ptr)
#define atomic_dec_eval64(ptr) _InterlockedDecrement64(ptr)
/* atomic_u32 */
FORCE_INLINE u32 atomic_u32_eval(struct atomic_u32 *x)
{
return _InterlockedExchangeAdd((volatile long *)&x->_v, 0);
}
FORCE_INLINE u32 atomic_u32_inc_eval(struct atomic_u32 *x)
{
return _InterlockedIncrement((volatile long *)&x->_v);
}
FORCE_INLINE u32 atomic_u32_dec_eval(struct atomic_u32 *x)
{
return _InterlockedDecrement((volatile long *)&x->_v);
}
FORCE_INLINE u32 atomic_u32_eval_add(struct atomic_u32 *x, u32 a)
{
return _InterlockedExchangeAdd((volatile long *)&x->_v, a);
}
FORCE_INLINE u32 atomic_u32_eval_exchange(struct atomic_u32 *x, u32 e)
{
return _InterlockedExchange((volatile long *)&x->_v, e);
}
FORCE_INLINE u32 atomic_u32_eval_compare_exchange(struct atomic_u32 *x, u32 c, u32 e)
{
return _InterlockedCompareExchange((volatile long *)&x->_v, e, c);
}
/* atomic_i64 */
FORCE_INLINE i64 atomic_i64_eval(struct atomic_i64 *x)
{
return _InterlockedExchangeAdd64(&x->_v, 0);
}
FORCE_INLINE i64 atomic_i64_inc_eval(struct atomic_i64 *x)
{
return _InterlockedIncrement64(&x->_v);
}
FORCE_INLINE i64 atomic_i64_dec_eval(struct atomic_i64 *x)
{
return _InterlockedDecrement64(&x->_v);
}
FORCE_INLINE i64 atomic_i64_eval_add(struct atomic_i64 *x, i64 a)
{
return _InterlockedExchangeAdd64(&x->_v, a);
}
FORCE_INLINE i64 atomic_i64_eval_exchange(struct atomic_i64 *x, i64 e)
{
return _InterlockedExchange64(&x->_v, e);
}
FORCE_INLINE i64 atomic_i64_eval_compare_exchange(struct atomic_i64 *x, i64 c, i64 e)
{
return _InterlockedCompareExchange64(&x->_v, e, c);
}
/* atomic_u64 */
FORCE_INLINE u64 atomic_u64_eval(struct atomic_u64 *x)
{
return _InterlockedExchangeAdd64((volatile i64 *)&x->_v, 0);
}
FORCE_INLINE u64 atomic_u64_inc_eval(struct atomic_u64 *x)
{
return _InterlockedIncrement64((volatile i64 *)&x->_v);
}
FORCE_INLINE u64 atomic_u64_dec_eval(struct atomic_u64 *x)
{
return _InterlockedDecrement64((volatile i64 *)&x->_v);
}
FORCE_INLINE u64 atomic_u64_eval_add(struct atomic_u64 *x, u64 a)
{
return _InterlockedExchangeAdd64((volatile i64 *)&x->_v, a);
}
FORCE_INLINE u64 atomic_u64_eval_exchange(struct atomic_u64 *x, u64 e)
{
return _InterlockedExchange64((volatile i64 *)&x->_v, e);
}
FORCE_INLINE u64 atomic_u64_eval_compare_exchange(struct atomic_u64 *x, u64 c, u64 e)
{
return _InterlockedCompareExchange64((volatile i64 *)&x->_v, e, c);
}
#else
# error "Atomics not implemented"

View File

@ -28,6 +28,7 @@ extern "C" {
#include <stddef.h>
#include <stdint.h>
#include <stdarg.h>
#include <intrin.h>
#include <nmmintrin.h> /* SSE4.2 */
/* ========================== *
@ -153,9 +154,11 @@ extern "C" {
# define INLINE static inline
#else
/* TODO: benchmark benefits of forced inlining */
# define INLINE __attribute((always_inline)) static inline
# define INLINE static inline __attribute((always_inline))
#endif
#define FORCE_INLINE static inline __attribute((always_inline))
/* Separate `static` usage into different keywords for easier grepping */
#define LOCAL_PERSIST static
#define INTERNAL static
@ -285,6 +288,26 @@ typedef u64 umm;
#define PI ((f32)3.14159265358979323846)
#define TAU ((f32)6.28318530717958647693)
/* ========================== *
* Atomics
* ========================== */
struct atomic_i32 {
volatile i32 _v;
};
struct atomic_u32 {
volatile u32 _v;
};
struct atomic_i64 {
volatile i64 _v;
};
struct atomic_u64 {
volatile u64 _v;
};
/* ========================== *
* Common structs
* ========================== */

View File

@ -51,24 +51,26 @@ void font_startup(void)
INTERNAL struct font_task_params *font_task_params_alloc(void)
{
struct font_task_params *p = NULL;
sys_mutex_lock(&L.params.mutex);
{
sys_mutex_lock(&L.params.mutex);
if (L.params.head_free) {
p = L.params.head_free;
L.params.head_free = p->next_free;
} else {
p = arena_push_zero(&L.params.arena, struct font_task_params);
}
sys_mutex_unlock(&L.params.mutex);
}
sys_mutex_unlock(&L.params.mutex);
return p;
}
INTERNAL void font_task_params_release(struct font_task_params *p)
{
sys_mutex_lock(&L.params.mutex);
p->next_free = L.params.head_free;
L.params.head_free = p;
{
p->next_free = L.params.head_free;
L.params.head_free = p;
}
sys_mutex_unlock(&L.params.mutex);
}

View File

@ -64,7 +64,9 @@ INTERNAL void publish_game_tick(void)
{
__prof;
sys_mutex_lock(&L.published_tick_mutex);
world_copy_replace(&L.published_tick, &L.world);
{
world_copy_replace(&L.published_tick, &L.world);
}
sys_mutex_unlock(&L.published_tick_mutex);
}
@ -567,7 +569,9 @@ void game_shutdown(void)
void game_get_latest_tick(struct world *dest)
{
sys_mutex_lock(&L.published_tick_mutex);
world_copy_replace(dest, &L.published_tick);
{
world_copy_replace(dest, &L.published_tick);
}
sys_mutex_unlock(&L.published_tick_mutex);
}

View File

@ -74,7 +74,10 @@ void log_startup(struct string logfile_path)
void log_register_callback(log_event_callback_func *func)
{
sys_mutex_lock(&L.mutex);
(UNUSED)func;
{
/* TODO */
(UNUSED)func;
}
sys_mutex_unlock(&L.mutex);
}

View File

@ -569,20 +569,20 @@ INLINE struct mat4x4 mat4x4_from_xform(struct xform xf)
};
}
INLINE struct mat4x4 mat4x4_from_ortho(f32 left, f32 right, f32 bottom, f32 top, f32 near, f32 far)
INLINE struct mat4x4 mat4x4_from_ortho(f32 left, f32 right, f32 bottom, f32 top, f32 near_z, f32 far_z)
{
struct mat4x4 m = {0};
f32 rl = 1.0f / (right - left);
f32 tb = 1.0f / (top - bottom);
f32 fn = -1.0f / (far - near);
f32 fn = -1.0f / (far_z - near_z);
m.e[0][0] = 2.0f * rl;
m.e[1][1] = 2.0f * tb;
m.e[2][2] = 2.0f * fn;
m.e[3][0] = -(right + left) * rl;
m.e[3][1] = -(top + bottom) * tb;
m.e[3][2] = (far + near) * fn;
m.e[3][2] = (far_z + near_z) * fn;
m.e[3][3] = 1.0f;
return m;

View File

@ -5,7 +5,6 @@
#include "sys.h"
#include "playback.h"
#include "math.h"
#include "atomic.h"
/* TODO: Cap max sounds playing. */
@ -188,8 +187,10 @@ struct mixer_track_handle mixer_play_ex(struct sound *sound, struct mixer_desc d
struct track *track;
{
sys_mutex_lock(&L.mutex);
track = track_alloc_assume_locked(sound);
track->desc = desc;
{
track = track_alloc_assume_locked(sound);
track->desc = desc;
}
sys_mutex_unlock(&L.mutex);
}
return track_to_handle(track);
@ -204,13 +205,13 @@ struct mixer_desc mixer_track_get(struct mixer_track_handle handle)
if (track) {
/* TODO: Only lock mutex on track itself or something */
sys_mutex_lock(&L.mutex);
/* Confirm handle is still valid now that we're locked */
track = track_from_handle(handle);
if (track) {
res = track->desc;
{
/* Confirm handle is still valid now that we're locked */
track = track_from_handle(handle);
if (track) {
res = track->desc;
}
}
sys_mutex_unlock(&L.mutex);
}
@ -224,13 +225,13 @@ void mixer_track_set(struct mixer_track_handle handle, struct mixer_desc desc)
if (track) {
/* TODO: Only lock mutex on track itself or something */
sys_mutex_lock(&L.mutex);
/* Confirm handle is still valid now that we're locked */
track = track_from_handle(handle);
if (track) {
track->desc = desc;
{
/* Confirm handle is still valid now that we're locked */
track = track_from_handle(handle);
if (track) {
track->desc = desc;
}
}
sys_mutex_unlock(&L.mutex);
}
}

View File

@ -195,22 +195,24 @@ INTERNAL struct renderer_handle handle_alloc(void *data)
struct handle_slot *slot = NULL;
sys_mutex_lock(&store->mutex);
if (store->head_free) {
/* Take first from free list */
slot = store->head_free;
store->head_free = slot->next_free;
slot->next_free = NULL;
} else {
/* Or push onto arena */
if (store->count + 1 >= HANDLE_IDX_MAX) {
sys_panic(STR("Maximum renderer handles exceeded"));
{
if (store->head_free) {
/* Take first from free list */
slot = store->head_free;
store->head_free = slot->next_free;
slot->next_free = NULL;
} else {
/* Or push onto arena */
if (store->count + 1 >= HANDLE_IDX_MAX) {
sys_panic(STR("Maximum renderer handles exceeded"));
}
slot = arena_push_zero(&store->arena, struct handle_slot);
slot->idx = store->count;
slot->gen = 1;
++store->count;
}
slot = arena_push_zero(&store->arena, struct handle_slot);
slot->idx = store->count;
slot->gen = 1;
++store->count;
slot->data = data;
}
slot->data = data;
sys_mutex_unlock(&store->mutex);
struct renderer_handle handle = HANDLE_CREATE(slot->idx, slot->gen);
@ -226,27 +228,29 @@ INTERNAL void handle_release(struct renderer_handle handle)
u32 gen = HANDLE_GEN(handle);
sys_mutex_lock(&store->mutex);
if (idx < store->count) {
struct handle_slot *slot = &store->array[idx];
if (slot->gen == gen) {
/* Insert into free list */
if (gen + 1 < HANDLE_GEN_MAX) {
slot->next_free = store->head_free;
store->head_free = slot;
{
if (idx < store->count) {
struct handle_slot *slot = &store->array[idx];
if (slot->gen == gen) {
/* Insert into free list */
if (gen + 1 < HANDLE_GEN_MAX) {
slot->next_free = store->head_free;
store->head_free = slot;
} else {
/* Maximum generations exceeded. Not a runtime error since it
* shouldn't cause issues in practice (just can't recycle this handle).
* Still probably means there's a problem in the code. */
ASSERT(false);
}
++slot->gen;
} else {
/* Maximum generations exceeded. Not a runtime error since it
* shouldn't cause issues in practice (just can't recycle this handle).
* Still probably means there's a problem in the code. */
/* Tried to release handle not in store (non-matching generation) */
ASSERT(false);
}
++slot->gen;
} else {
/* Tried to release handle not in store (non-matching generation) */
/* Tried to release out-of-bounds handle */
ASSERT(false);
}
} else {
/* Tried to release out-of-bounds handle */
ASSERT(false);
}
sys_mutex_unlock(&store->mutex);
}

View File

@ -48,24 +48,26 @@ void sheet_startup(void)
INTERNAL struct sheet_task_params *sheet_task_params_alloc(void)
{
struct sheet_task_params *p = NULL;
sys_mutex_lock(&L.params.mutex);
{
sys_mutex_lock(&L.params.mutex);
if (L.params.head_free) {
p = L.params.head_free;
L.params.head_free = p->next_free;
} else {
p = arena_push_zero(&L.params.arena, struct sheet_task_params);
}
sys_mutex_unlock(&L.params.mutex);
}
sys_mutex_unlock(&L.params.mutex);
return p;
}
INTERNAL void sheet_task_params_release(struct sheet_task_params *p)
{
sys_mutex_lock(&L.params.mutex);
p->next_free = L.params.head_free;
L.params.head_free = p;
{
p->next_free = L.params.head_free;
L.params.head_free = p;
}
sys_mutex_unlock(&L.params.mutex);
}

View File

@ -49,11 +49,13 @@ INTERNAL struct sound_task_params *sound_task_params_alloc(void)
struct sound_task_params *p = NULL;
{
sys_mutex_lock(&L.params.mutex);
if (L.params.head_free) {
p = L.params.head_free;
L.params.head_free = p->next_free;
} else {
p = arena_push_zero(&L.params.arena, struct sound_task_params);
{
if (L.params.head_free) {
p = L.params.head_free;
L.params.head_free = p->next_free;
} else {
p = arena_push_zero(&L.params.arena, struct sound_task_params);
}
}
sys_mutex_unlock(&L.params.mutex);
}
@ -63,8 +65,10 @@ INTERNAL struct sound_task_params *sound_task_params_alloc(void)
INTERNAL void sound_task_params_release(struct sound_task_params *p)
{
sys_mutex_lock(&L.params.mutex);
p->next_free = L.params.head_free;
L.params.head_free = p;
{
p->next_free = L.params.head_free;
L.params.head_free = p;
}
sys_mutex_unlock(&L.params.mutex);
}

View File

@ -330,7 +330,7 @@ void sys_mutex_assert_locked(struct sys_mutex *mutex);
struct sys_rw_mutex {
u64 handle;
#if RTC
i64 num_shared;
struct atomic_i64 num_shared;
u64 owner_tid;
# if _WIN32
wchar_t *owner_name;
@ -360,7 +360,7 @@ void sys_rw_mutex_assert_locked_exclusive(struct sys_rw_mutex *mutex);
struct sys_condition_variable {
u64 handle;
#if RTC
i64 num_sleepers;
struct atomic_i64 num_sleepers;
#endif
};

View File

@ -1205,7 +1205,7 @@ void sys_rw_mutex_release(struct sys_rw_mutex *mutex)
(UNUSED)mutex;
/* Mutex must be unlocked */
ASSERT(mutex->owner_tid == 0);
ASSERT(mutex->num_shared == 0);
ASSERT(atomic_i64_eval(&mutex->num_shared) == 0);
}
void sys_rw_mutex_lock_exclusive(struct sys_rw_mutex *mutex)
@ -1233,7 +1233,7 @@ void sys_rw_mutex_lock_shared(struct sys_rw_mutex *mutex)
__prof;
AcquireSRWLockShared((SRWLOCK *)&mutex->handle);
#if RTC
atomic_inc_eval64(&mutex->num_shared);
atomic_i64_inc_eval(&mutex->num_shared);
#endif
}
@ -1241,7 +1241,7 @@ void sys_rw_mutex_unlock_shared(struct sys_rw_mutex *mutex)
{
__prof;
#if RTC
atomic_dec_eval64(&mutex->num_shared);
atomic_i64_dec_eval(&mutex->num_shared);
#endif
ReleaseSRWLockShared((SRWLOCK *)&mutex->handle);
}
@ -1263,11 +1263,13 @@ INTERNAL struct win32_condition_variable *win32_condition_variable_alloc(void)
struct win32_condition_variable *cv = NULL;
{
sys_mutex_lock(&L.condition_variables_mutex);
if (L.first_free_condition_variable) {
cv = L.first_free_condition_variable;
L.first_free_condition_variable = cv->next_free;
} else {
cv = arena_push_zero(&L.condition_variables_arena, struct win32_condition_variable);
{
if (L.first_free_condition_variable) {
cv = L.first_free_condition_variable;
L.first_free_condition_variable = cv->next_free;
} else {
cv = arena_push_zero(&L.condition_variables_arena, struct win32_condition_variable);
}
}
sys_mutex_unlock(&L.condition_variables_mutex);
}
@ -1303,7 +1305,7 @@ void sys_condition_variable_release(struct sys_condition_variable *cv)
{
__prof;
/* Condition variable must not have any sleepers (signal before releasing) */
ASSERT(cv->num_sleepers == 0);
ASSERT(atomic_i64_eval(&cv->num_sleepers) == 0);
win32_condition_variable_release((struct win32_condition_variable *)cv->handle);
}
@ -1311,11 +1313,11 @@ void sys_condition_variable_wait(struct sys_condition_variable *cv, struct sys_m
{
__prof;
#if RTC
atomic_inc_eval64(&cv->num_sleepers);
atomic_i64_inc_eval(&cv->num_sleepers);
#endif
SleepConditionVariableSRW((PCONDITION_VARIABLE)cv->handle, (SRWLOCK *)&mutex->handle, INFINITE, 0);
#if RTC
atomic_dec_eval64(&cv->num_sleepers);
atomic_i64_dec_eval(&cv->num_sleepers);
#endif
}
@ -1323,12 +1325,12 @@ void sys_condition_variable_wait_time(struct sys_condition_variable *cv, struct
{
__prof;
#if RTC
atomic_inc_eval64(&cv->num_sleepers);
atomic_i64_inc_eval(&cv->num_sleepers);
#endif
u32 ms = (u32)math_round((f32)seconds * 1000.f);
SleepConditionVariableSRW((PCONDITION_VARIABLE)cv->handle, (SRWLOCK *)&mutex->handle, ms, 0);
#if RTC
atomic_dec_eval64(&cv->num_sleepers);
atomic_i64_dec_eval(&cv->num_sleepers);
#endif
}
@ -1358,7 +1360,7 @@ void sys_semaphore_release(struct sys_semaphore *semaphore)
CloseHandle((HANDLE)semaphore->handle);
}
void sys_semaphore_wait(struct sys_semaphore *semaphore)
void sys_semaphore_wait(struct sys_semaphore *semaphore)
{
__prof;
WaitForSingleObjectEx((HANDLE)semaphore->handle, INFINITE, FALSE);
@ -1639,9 +1641,9 @@ u32 sys_rand_u32(void)
return v;
}
/* Like sys_panic, but guaranteed to have no side-effects */
void sys_panic_raw(char *msg_cstr)
{
app_quit();
MessageBoxExA(NULL, msg_cstr, "Fatal error", MB_ICONSTOP, 0);
ASSERT(false);
sys_exit();

View File

@ -54,11 +54,13 @@ INTERNAL struct texture_task_params *texture_task_params_alloc(void)
struct texture_task_params *p = NULL;
{
sys_mutex_lock(&L.params.mutex);
if (L.params.head_free) {
p = L.params.head_free;
L.params.head_free = p->next_free;
} else {
p = arena_push_zero(&L.params.arena, struct texture_task_params);
{
if (L.params.head_free) {
p = L.params.head_free;
L.params.head_free = p->next_free;
} else {
p = arena_push_zero(&L.params.arena, struct texture_task_params);
}
}
sys_mutex_unlock(&L.params.mutex);
}
@ -68,8 +70,10 @@ INTERNAL struct texture_task_params *texture_task_params_alloc(void)
INTERNAL void texture_task_params_release(struct texture_task_params *p)
{
sys_mutex_lock(&L.params.mutex);
p->next_free = L.params.head_free;
L.params.head_free = p;
{
p->next_free = L.params.head_free;
L.params.head_free = p;
}
sys_mutex_unlock(&L.params.mutex);
}

View File

@ -63,7 +63,6 @@ GLOBAL struct {
struct v2 screen_center;
struct v2 screen_cursor;
struct v2 world_cursor;
struct v2 mouse_delta;
} L = { 0 }, DEBUG_LVAR(L_user);
/* ========================== *
@ -95,18 +94,16 @@ GLOBAL READONLY enum user_bind_kind g_binds[SYS_BTN_COUNT] = {
INTERNAL struct sys_event_array pop_sys_events(struct arena *arena)
{
struct sys_event_array array = { 0 };
if (L.sys_events_arena.pos > 0) {
sys_mutex_lock(&L.sys_events_mutex);
{
struct buffer events_buff = arena_to_buffer(&L.sys_events_arena);
arena_align(arena, ALIGNOF(struct sys_event));
array.events = (struct sys_event *)arena_push_array(arena, u8, events_buff.size);
array.count = events_buff.size / sizeof(struct sys_event);
MEMCPY(array.events, events_buff.data, events_buff.size);
arena_reset(&L.sys_events_arena);
}
sys_mutex_unlock(&L.sys_events_mutex);
sys_mutex_lock(&L.sys_events_mutex);
{
struct buffer events_buff = arena_to_buffer(&L.sys_events_arena);
arena_align(arena, ALIGNOF(struct sys_event));
array.events = (struct sys_event *)arena_push_array(arena, u8, events_buff.size);
array.count = events_buff.size / sizeof(struct sys_event);
MEMCPY(array.events, events_buff.data, events_buff.size);
arena_reset(&L.sys_events_arena);
}
sys_mutex_unlock(&L.sys_events_mutex);
return array;
}
@ -448,8 +445,6 @@ INTERNAL void user_update(void)
};
}
L.mouse_delta = V2(0, 0);
for (u64 entity_index = 0; entity_index < events.count; ++entity_index) {
struct sys_event *event = &events.events[entity_index];
@ -476,11 +471,6 @@ INTERNAL void user_update(void)
L.screen_cursor = event->cursor_position;
}
/* Update mouse delta */
if (event->kind == SYS_EVENT_KIND_MOUSE_MOVE) {
L.mouse_delta = v2_add(L.mouse_delta, event->mouse_delta);
}
/* Update bind states */
if ((event->kind == SYS_EVENT_KIND_BUTTON_DOWN || event->kind == SYS_EVENT_KIND_BUTTON_UP) && !event->is_repeat) {
enum sys_btn button = event->button;
@ -779,16 +769,16 @@ INTERNAL void user_update(void)
}
}
/* Draw crosshair or enable cursor */
/* Draw crosshair or show cursor */
if (!L.debug_camera) {
struct v2 crosshair_pos = L.screen_cursor;
u32 tint = RGBA_F(1, 1, 1, 0.5);
u32 tint = RGBA_F(1, 1, 1, 1 );
struct v2 size = V2(0, 0);
struct texture *t = texture_load_async(STR("res/graphics/crosshair.ase"));
if (t) {
size = t->size;
struct xform xf = XFORM_TRS(.t = crosshair_pos, .s = t->size);
struct xform xf = XFORM_TRS(.t = crosshair_pos, .s = size);
struct quad quad = quad_mul_xform(QUAD_UNIT_SQUARE_CENTERED, xf);
draw_texture_quad(L.screen_canvas, DRAW_TEXTURE_PARAMS(.texture = t, .tint = tint), quad);
}

View File

@ -5,6 +5,7 @@
#include "string.h"
#include "memory.h"
#include "arena.h"
#include "atomic.h"
/* Utility functions and stuff that don't have a home :( */
@ -131,7 +132,7 @@ struct sync_flag {
/* TODO: Make this a rw mutex? */
struct sys_mutex mutex;
struct sys_condition_variable cv;
b32 flag;
struct atomic_i32 flag;
};
INLINE struct sync_flag sync_flag_alloc(void)
@ -153,11 +154,12 @@ INLINE void sync_flag_release(struct sync_flag *sf)
INLINE void sync_flag_set(struct sync_flag *sf)
{
__prof;
WRITE_BARRIER();
if (!sf->flag) {
if (atomic_i32_eval_compare_exchange(&sf->flag, 0, 1) == 0) {
sys_mutex_lock(&sf->mutex);
sf->flag = true;
sys_condition_variable_signal(&sf->cv);
{
atomic_i32_eval_exchange(&sf->flag, 1);
sys_condition_variable_signal(&sf->cv);
}
sys_mutex_unlock(&sf->mutex);
}
}
@ -165,13 +167,9 @@ INLINE void sync_flag_set(struct sync_flag *sf)
INLINE void sync_flag_wait(struct sync_flag *sf)
{
__prof;
READ_BARRIER();
if (!sf->flag) {
while (atomic_i32_eval(&sf->flag) == 0) {
sys_mutex_lock(&sf->mutex);
while (!sf->flag) {
sys_condition_variable_wait(&sf->cv, &sf->mutex);
}
sys_mutex_unlock(&sf->mutex);
sys_condition_variable_wait(&sf->cv, &sf->mutex);
}
}

View File

@ -110,20 +110,22 @@ void work_startup(u32 num_worker_threads)
/* Initialize threads */
{
sys_mutex_lock(&L.mutex);
struct worker *prev = NULL;
for (u32 i = 0; i < num_worker_threads; ++i) {
struct string thread_name = string_format(scratch.arena,
STR("[P0] Worker %F"),
FMT_UINT(i));
{
struct worker *prev = NULL;
for (u32 i = 0; i < num_worker_threads; ++i) {
struct string thread_name = string_format(scratch.arena,
STR("[P0] Worker %F"),
FMT_UINT(i));
struct worker *worker = arena_push_zero(&L.arena, struct worker);
worker->thread = sys_thread_init(&worker_thread_entry_point, NULL, thread_name);
if (prev) {
prev->next = worker;
} else {
L.worker_head = worker;
struct worker *worker = arena_push_zero(&L.arena, struct worker);
worker->thread = sys_thread_init(&worker_thread_entry_point, NULL, thread_name);
if (prev) {
prev->next = worker;
} else {
L.worker_head = worker;
}
prev = worker;
}
prev = worker;
}
sys_mutex_unlock(&L.mutex);
}
@ -320,12 +322,12 @@ INTERNAL b32 work_exec_single_task_maybe_release_assume_locked(struct work *work
work->status = WORK_STATUS_IN_PROGRESS;
++work->workers;
/* Do task (temporarily unlock) */
sys_mutex_unlock(&L.mutex);
{
/* Do task (temporarily unlock) */
sys_mutex_unlock(&L.mutex);
task->func(task->data);
sys_mutex_lock(&L.mutex);
}
sys_mutex_lock(&L.mutex);
--work->workers;
--work->tasks_incomplete;
task_release_assume_locked(task);
@ -378,12 +380,14 @@ INTERNAL void worker_thread_entry_point(void *thread_data)
while (L.scheduled_work_head) {
/* Do work from top */
sys_mutex_lock(&L.mutex);
struct work *work = L.scheduled_work_head;
if (work) {
__profscope(work_pool_task);
--L.idle_worker_count;
work_exec_single_task_maybe_release_assume_locked((struct work *)work);
++L.idle_worker_count;
{
struct work *work = L.scheduled_work_head;
if (work) {
__profscope(work_pool_task);
--L.idle_worker_count;
work_exec_single_task_maybe_release_assume_locked((struct work *)work);
++L.idle_worker_count;
}
}
sys_mutex_unlock(&L.mutex);
}
@ -442,19 +446,20 @@ INTERNAL struct work_handle work_push_from_slate_assume_locked(struct work_slate
INTERNAL struct work_handle work_push_task_internal(work_task_func *func, void *data, b32 help, enum work_priority priority)
{
struct work_handle handle;
sys_mutex_lock(&L.mutex);
{
struct work_task *task = task_alloc_assume_locked();
task->data = data;
task->func = func;
struct work_task *task = task_alloc_assume_locked();
task->data = data;
task->func = func;
struct work_slate ws = {
.task_head = task,
.task_tail = task,
.num_tasks = 1
};
struct work_handle handle = work_push_from_slate_assume_locked(&ws, help, priority);
struct work_slate ws = {
.task_head = task,
.task_tail = task,
.num_tasks = 1
};
handle = work_push_from_slate_assume_locked(&ws, help, priority);
}
sys_mutex_unlock(&L.mutex);
return handle;
@ -487,11 +492,11 @@ void work_slate_push_task(struct work_slate *ws, work_task_func *func, void *dat
__prof;
struct work_task *task = NULL;
sys_mutex_lock(&L.mutex);
{
sys_mutex_lock(&L.mutex);
task = task_alloc_assume_locked();
sys_mutex_unlock(&L.mutex);
}
sys_mutex_unlock(&L.mutex);
task->data = data;
task->func = func;
@ -511,8 +516,11 @@ struct work_handle work_slate_end(struct work_slate *ws, enum work_priority prio
{
__prof;
struct work_handle handle;
sys_mutex_lock(&L.mutex);
struct work_handle handle = work_push_from_slate_assume_locked(ws, false, priority);
{
handle = work_push_from_slate_assume_locked(ws, false, priority);
}
sys_mutex_unlock(&L.mutex);
return handle;
@ -550,21 +558,21 @@ void work_wait(struct work_handle handle)
{
__prof;
sys_mutex_lock(&L.mutex);
struct work *work = work_from_handle_assume_locked(handle);
if (work) {
/* Help with tasks */
work_exec_remaining_tasks_maybe_release_assume_locked(work);
/* Wait for work completion */
work = work_from_handle_assume_locked(handle); /* Re-checking work is sitll valid here in case work_do caused work to release */
{
struct work *work = work_from_handle_assume_locked(handle);
if (work) {
while (work->status != WORK_STATUS_DONE) {
sys_condition_variable_wait(&work->condition_variable_finished, &L.mutex);
/* Help with tasks */
work_exec_remaining_tasks_maybe_release_assume_locked(work);
/* Wait for work completion */
work = work_from_handle_assume_locked(handle); /* Re-checking work is sitll valid here in case work_do caused work to release */
if (work) {
while (work->status != WORK_STATUS_DONE) {
sys_condition_variable_wait(&work->condition_variable_finished, &L.mutex);
}
}
}
}
sys_mutex_unlock(&L.mutex);
}