snc funcs to temporarily replace old sys mutex & cv

This commit is contained in:
jacob 2025-07-06 15:32:05 -05:00
parent 32b82f4417
commit 8c080477a6
3 changed files with 140 additions and 191 deletions

100
src/snc.c Normal file
View File

@ -0,0 +1,100 @@
#include "snc.h"
#include "atomic.h"
#include "sys.h"
#include "memory.h"
/* ========================== *
* Mutex
* ========================== */
struct snc_lock snc_lock_e(struct snc_mutex *m)
{
__prof;
b32 locked = false;
while (!locked) {
i32 v = atomic_i32_fetch_test_set(&m->v, 0, (1 << 31));
if (v == 0) {
locked = true;
} else {
/* Set pending */
if ((v & (1 << 30)) == 0) {
i32 old = atomic_i32_fetch_test_set(&m->v, v | (1 << 30), v);
while (old != v && (old & (1 << 30)) == 0) {
v = old;
old = atomic_i32_fetch_test_set(&m->v, v | (1 << 30), v);
}
v = old;
}
/* Wait for change */
sys_wait(&m->v, &v, 4);
}
}
struct snc_lock lock = ZI;
lock.exclusive = true;
lock.mutex = m;
return lock;
}
struct snc_lock snc_lock_s(struct snc_mutex *m)
{
__prof;
b32 locked = false;
while (!locked) {
i32 v = atomic_i32_fetch(&m->v);
while (!locked && (v & 0xC0000000) == 0) {
/* Increment shared lock count */
i32 old = atomic_i32_fetch_test_set(&m->v, v, v + 1);
if (v == old) {
locked = true;
}
}
if (!locked) {
sys_wait(&m->v, &v, 4);
}
}
struct snc_lock lock = ZI;
lock.mutex = m;
return lock;
}
void snc_unlock(struct snc_lock *l)
{
__prof;
struct snc_mutex *m = l->mutex;
if (l->exclusive) {
atomic_i32_fetch_set(&m->v, 0);
} else {
atomic_i32_fetch_add(&m->v, -1);
}
sys_wake_all(&m->v);
MEMZERO_STRUCT(l);
}
/* ========================== *
* Condition variable
* ========================== */
void snc_cv_wait(struct snc_cv *cv, struct snc_lock *l)
{
u64 old_wake_gen = atomic_u64_fetch(&cv->wake_gen);
u64 wake_gen = old_wake_gen;
{
snc_unlock(l);
do {
sys_wait(&cv->wake_gen, &old_wake_gen, sizeof(old_wake_gen));
wake_gen = atomic_u64_fetch(&cv->wake_gen);
} while (wake_gen == old_wake_gen);
sys_wake_all(&cv->wake_gen);
if (l->exclusive) {
*l= snc_lock_e(l->mutex);
} else {
*l= snc_lock_s(l->mutex);
}
}
}
void snc_cv_broadcast(struct snc_cv *cv)
{
atomic_u64_fetch_add_u64(&cv->wake_gen, 1);
sys_wake_all(&cv->wake_gen);
}

40
src/snc.h Normal file
View File

@ -0,0 +1,40 @@
#ifndef SNC_H
#define SNC_H
/* ========================== *
* Mutex
* ========================== */
struct snc_lock {
struct snc_mutex *mutex;
b32 exclusive;
};
struct snc_mutex {
struct atomic_i32 v;
};
struct snc_lock snc_lock_e(struct snc_mutex *m);
struct snc_lock snc_lock_s(struct snc_mutex *m);
void snc_unlock(struct snc_lock *lock);
#if RTC
# define snc_assert_locked_e(l, m) ASSERT((l)->mutex == (m) && (l)->exclusive == true)
# define snc_assert_locked_e_or_s(l, m) ASSERT((l)->mutex == (m))
#else
# define snc_assert_locked_e(l, m) (UNUSED)l
# define snc_assert_locked_e_or_s(l, m) (UNUSED)l
#endif
/* ========================== *
* Condition variable
* ========================== */
struct snc_cv {
struct atomic_u64 wake_gen;
};
void snc_cv_wait(struct snc_cv *cv, struct snc_lock *lock);
void snc_cv_broadcast(struct snc_cv *cv);
#endif

View File

@ -2509,196 +2509,6 @@ void sys_window_cursor_disable_clip(struct sys_window *sys_window)
win32_window_wake(window);
}
/* ========================== *
* Mutex
* ========================== */
INTERNAL void win32_mutex_init(struct win32_mutex *m)
{
MEMZERO_STRUCT(m);
}
struct sys_mutex *sys_mutex_alloc(void)
{
__prof;
struct win32_mutex *m = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(G.mutexes_mutex);
if (G.first_free_mutex) {
m = G.first_free_mutex;
G.first_free_mutex = m->next_free;
} else {
m = arena_push_no_zero(G.mutexes_arena, struct win32_mutex);
}
sys_mutex_unlock(&lock);
}
win32_mutex_init(m);
return (struct sys_mutex *)m;
}
void sys_mutex_release(struct sys_mutex *mutex)
{
__prof;
struct win32_mutex *m = (struct win32_mutex *)mutex;
{
struct sys_lock lock = sys_mutex_lock_e(G.mutexes_mutex);
m->next_free = G.first_free_mutex;
G.first_free_mutex = m;
sys_mutex_unlock(&lock);
}
}
struct sys_lock sys_mutex_lock_e(struct sys_mutex *mutex)
{
__prof;
struct win32_mutex *m = (struct win32_mutex *)mutex;
b32 locked = false;
while (!locked) {
i32 v = atomic_i32_fetch_test_set(&m->v, 0, (1 << 31));
if (v == 0) {
locked = true;
} else {
/* Set pending */
if ((v & (1 << 30)) == 0) {
i32 old = atomic_i32_fetch_test_set(&m->v, v | (1 << 30), v);
while (old != v && (old & (1 << 30)) == 0) {
v = old;
old = atomic_i32_fetch_test_set(&m->v, v | (1 << 30), v);
}
v = old;
}
/* Wait for change */
sys_wait(&m->v, &v, 4);
}
}
struct sys_lock lock = ZI;
lock.exclusive = true;
lock.mutex = mutex;
return lock;
}
struct sys_lock sys_mutex_lock_s(struct sys_mutex *mutex)
{
__prof;
struct win32_mutex *m = (struct win32_mutex *)mutex;
b32 locked = false;
while (!locked) {
i32 v = atomic_i32_fetch(&m->v);
while (!locked && (v & 0xC0000000) == 0) {
/* Increment shared lock count */
i32 old = atomic_i32_fetch_test_set(&m->v, v, v + 1);
if (v == old) {
locked = true;
}
}
if (!locked) {
sys_wait(&m->v, &v, 4);
}
}
struct sys_lock lock = ZI;
lock.mutex = mutex;
return lock;
}
void sys_mutex_unlock(struct sys_lock *lock)
{
__prof;
struct win32_mutex *m = (struct win32_mutex *)lock->mutex;
if (lock->exclusive) {
atomic_i32_fetch_set(&m->v, 0);
} else {
atomic_i32_fetch_add(&m->v, -1);
}
sys_wake_all(&m->v);
MEMZERO_STRUCT(lock);
}
#if RTC
void sys_assert_locked_e(struct sys_lock *lock, struct sys_mutex *mutex)
{
ASSERT(lock->mutex == mutex);
ASSERT(lock->exclusive == true);
}
void sys_assert_locked_e_or_s(struct sys_lock *lock, struct sys_mutex *mutex)
{
ASSERT(lock->mutex == mutex);
}
#endif
/* ========================== *
* Condition variable
* ========================== */
INTERNAL struct win32_condition_variable *win32_condition_variable_alloc(void)
{
__prof;
struct win32_condition_variable *cv = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(G.condition_variables_mutex);
if (G.first_free_condition_variable) {
cv = G.first_free_condition_variable;
G.first_free_condition_variable = cv->next_free;
} else {
cv = arena_push_no_zero(G.condition_variables_arena, struct win32_condition_variable);
}
sys_mutex_unlock(&lock);
}
MEMZERO_STRUCT(cv);
return cv;
}
INTERNAL void win32_condition_variable_release(struct win32_condition_variable *w32cv)
{
__prof;
struct sys_lock lock = sys_mutex_lock_e(G.condition_variables_mutex);
w32cv->next_free = G.first_free_condition_variable;
G.first_free_condition_variable = w32cv;
sys_mutex_unlock(&lock);
}
struct sys_condition_variable *sys_condition_variable_alloc(void)
{
__prof;
return (struct sys_condition_variable *)win32_condition_variable_alloc();
}
void sys_condition_variable_release(struct sys_condition_variable *sys_cv)
{
__prof;
struct win32_condition_variable *cv = (struct win32_condition_variable *)sys_cv;
win32_condition_variable_release(cv);
}
void sys_condition_variable_wait(struct sys_condition_variable *sys_cv, struct sys_lock *lock)
{
struct win32_condition_variable *cv = (struct win32_condition_variable *)sys_cv;
struct sys_mutex *mutex = lock->mutex;
b32 exclusive = lock->exclusive;
u64 old_wake_gen = atomic_u64_fetch(&cv->wake_gen);
u64 wake_gen = old_wake_gen;
{
sys_mutex_unlock(lock);
do {
sys_wait(&cv->wake_gen, &old_wake_gen, sizeof(old_wake_gen));
wake_gen = atomic_u64_fetch(&cv->wake_gen);
} while (wake_gen == old_wake_gen);
sys_wake_all(&cv->wake_gen);
if (exclusive) {
*lock = sys_mutex_lock_e(mutex);
} else {
*lock = sys_mutex_lock_s(mutex);
}
}
}
void sys_condition_variable_broadcast(struct sys_condition_variable *sys_cv)
{
struct win32_condition_variable *cv = (struct win32_condition_variable *)sys_cv;
atomic_u64_fetch_add_u64(&cv->wake_gen, 1);
sys_wake_all(&cv->wake_gen);
}
/* ========================== *
* Threads
* ========================== */
@ -3255,7 +3065,6 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
/* Setup mutexes */
G.mutexes_arena = arena_alloc(GIGABYTE(64));
struct win32_mutex *first_mutex = arena_push(G.mutexes_arena, struct win32_mutex);
win32_mutex_init(first_mutex);
G.mutexes_mutex = (struct sys_mutex *)first_mutex;
/* Set up condition variables */