power_play/src/snc.c

174 lines
4.4 KiB
C

#include "snc.h"
#include "atomic.h"
#include "sys.h"
#include "memory.h"
#include "intrinsics.h"
#define DEFAULT_MUTEX_SPIN 4000
/* ========================== *
* Mutex
* ========================== */
struct snc_lock snc_lock_spin_e(struct snc_mutex *m, i32 spin)
{
b32 locked = false;
i32 spin_cnt = 0;
while (!locked) {
++spin_cnt;
u32 v = atomic_u32_fetch_test_set(&m->v, 0, 0x80000000);
if (v == 0) {
locked = true;
} else if (v == 0x40000000) {
/* Lock has pending bit set, try to lock */
u32 swp = atomic_u32_fetch_test_set(&m->v, v, 0x80000000);
while (swp != v && swp == 0x40000000) {
v = swp;
swp = atomic_u32_fetch_test_set(&m->v, v, 0x80000000);
}
v = swp;
if (v == 0x40000000) {
locked = true;
}
}
if (!locked && (v & 0xC0000000) == 0) {
/* Lock has shared lockers and no pending waiter, set pending bit */
u32 swp = atomic_u32_fetch_test_set(&m->v, v, v | 0x40000000);
while (swp != v && (swp & 0xC0000000) == 0 && swp != 0) {
v = swp;
swp = atomic_u32_fetch_test_set(&m->v, v, v | 0x40000000);
}
v = swp;
}
/* Pause or wait */
if (!locked && v != 0 && v != 0x40000000) {
if (spin_cnt < spin) {
ix_pause();
} else {
sys_wait(&m->v, &v, 4, I64_MAX);
spin_cnt = 0;
}
}
}
#if RTC
atomic_i32_fetch_set(&m->exclusive_fiber_id, sys_current_fiber_id());
#endif
struct snc_lock lock = ZI;
lock.exclusive = true;
lock.mutex = m;
return lock;
}
struct snc_lock snc_lock_spin_s(struct snc_mutex *m, i32 spin)
{
b32 locked = false;
i32 spin_cnt = 0;
while (!locked) {
++spin_cnt;
u32 v = atomic_u32_fetch(&m->v);
while (!locked && (v & 0xC0000000) == 0) {
/* Lock has no exclusive or pending exclusive lock, increment shared count */
u32 swp = atomic_u32_fetch_test_set(&m->v, v, v + 1);
if (v == swp) {
locked = true;
} else {
v = swp;
}
}
/* Pause or wait */
if (!locked) {
if (spin_cnt < spin) {
ix_pause();
} else {
sys_wait(&m->v, &v, 4, I64_MAX);
spin_cnt = 0;
}
}
}
struct snc_lock lock = ZI;
lock.mutex = m;
return lock;
}
struct snc_lock snc_lock_e(struct snc_mutex *m)
{
return snc_lock_spin_e(m, DEFAULT_MUTEX_SPIN);
}
struct snc_lock snc_lock_s(struct snc_mutex *m)
{
return snc_lock_spin_s(m, DEFAULT_MUTEX_SPIN);
}
void snc_unlock(struct snc_lock *l)
{
struct snc_mutex *m = l->mutex;
if (l->exclusive) {
#if RTC
atomic_i32_fetch_set(&m->exclusive_fiber_id, 0);
#endif
atomic_u32_fetch_set(&m->v, 0);
} else {
atomic_u32_fetch_add_i32(&m->v, -1);
}
sys_wake_all(&m->v);
MEMZERO_STRUCT(l);
}
/* ========================== *
* Condition variable
* ========================== */
void snc_cv_wait(struct snc_cv *cv, struct snc_lock *l)
{
snc_cv_wait_time(cv, l, I64_MAX);
}
void snc_cv_wait_time(struct snc_cv *cv, struct snc_lock *l, i64 timeout_ns)
{
u64 old_wake_gen = atomic_u64_fetch(&cv->wake_gen);
struct snc_mutex *mutex = l->mutex;
b32 exclusive = l->exclusive;
{
snc_unlock(l);
{
sys_wait(&cv->wake_gen, &old_wake_gen, sizeof(old_wake_gen), timeout_ns);
}
if (exclusive) {
*l = snc_lock_e(mutex);
} else {
*l = snc_lock_s(mutex);
}
}
}
void snc_cv_broadcast(struct snc_cv *cv)
{
atomic_u64_fetch_add_u64(&cv->wake_gen, 1);
sys_wake_all(&cv->wake_gen);
}
/* ========================== *
* Counter
* ========================== */
void snc_counter_add(struct snc_counter *counter, i64 x)
{
i64 old_v = atomic_i64_fetch_add(&counter->v, x);
i64 new_v = old_v + x;
if (old_v > 0 && new_v <= 0) {
sys_wake_all(&counter->v);
}
}
void snc_counter_wait(struct snc_counter *counter)
{
i64 v = atomic_i64_fetch(&counter->v);
while (v > 0) {
sys_wait(&counter->v, &v, sizeof(v), I64_MAX);
v = atomic_i64_fetch(&counter->v);
}
}