//////////////////////////////// //~ Mutex //- Exclusive mutex lock P_Lock P_LockSpinE(P_Mutex *m, i32 spin) { b32 locked = 0; i32 spin_cnt = 0; while (!locked) { ++spin_cnt; u32 v = atomic32_fetch_test_set(&m->v, 0, 0x80000000); if (v == 0) { locked = 1; } else if (v == 0x40000000) { /* Lock has pending bit set, try to lock */ u32 swp = atomic32_fetch_test_set(&m->v, v, 0x80000000); while (swp != v && swp == 0x40000000) { v = swp; swp = atomic32_fetch_test_set(&m->v, v, 0x80000000); } v = swp; if (v == 0x40000000) { locked = 1; } } if (!locked && (v & 0xC0000000) == 0) { /* Lock has shared lockers and no pending waiter, set pending bit */ u32 swp = atomic32_fetch_test_set(&m->v, v, v | 0x40000000); while (swp != v && (swp & 0xC0000000) == 0 && swp != 0) { v = swp; swp = atomic32_fetch_test_set(&m->v, v, v | 0x40000000); } v = swp; } /* Pause or wait */ if (!locked && v != 0 && v != 0x40000000) { if (spin_cnt < spin) { ix_pause(); } else { P_Wait(&m->v, &v, 4, I64Max); spin_cnt = 0; } } } #if RtcIsEnabled atomic32_fetch_set(&m->exclusive_fiber_id, FiberId()); #endif P_Lock lock = ZI; lock.exclusive = 1; lock.mutex = m; return lock; } //- Shared mutex lock P_Lock P_LockSpinS(P_Mutex *m, i32 spin) { b32 locked = 0; i32 spin_cnt = 0; while (!locked) { ++spin_cnt; u32 v = atomic32_fetch(&m->v); while (!locked && (v & 0xC0000000) == 0) { /* Lock has no exclusive or pending exclusive lock, increment shared count */ u32 swp = atomic32_fetch_test_set(&m->v, v, v + 1); if (v == swp) { locked = 1; } else { v = swp; } } /* Pause or wait */ if (!locked) { if (spin_cnt < spin) { ix_pause(); } else { P_Wait(&m->v, &v, 4, I64Max); spin_cnt = 0; } } } P_Lock lock = ZI; lock.mutex = m; return lock; } //- Mutex lock wrappers P_Lock P_LockE(P_Mutex *m) { return P_LockSpinE(m, P_DefaultMutexSpin); } P_Lock P_LockS(P_Mutex *m) { return P_LockSpinS(m, P_DefaultMutexSpin); } void P_Unlock(P_Lock *l) { P_Mutex *m = l->mutex; if (l->exclusive) { #if RtcIsEnabled atomic32_fetch_set(&m->exclusive_fiber_id, 0); #endif atomic32_fetch_set(&m->v, 0); } else { atomic32_fetch_add(&m->v, -1); } P_Wake(&m->v, I32Max); MEMZERO_STRUCT(l); } //////////////////////////////// //~ Condition variable void P_WaitOnCv(P_Cv *cv, P_Lock *l) { P_WaitOnCvTime(cv, l, I64Max); } void P_WaitOnCvTime(P_Cv *cv, P_Lock *l, i64 timeout_ns) { u64 old_wake_gen = atomic64_fetch(&cv->wake_gen); P_Mutex *mutex = l->mutex; b32 exclusive = l->exclusive; { P_Unlock(l); { P_Wait(&cv->wake_gen, &old_wake_gen, sizeof(old_wake_gen), timeout_ns); } if (exclusive) { *l = P_LockE(mutex); } else { *l = P_LockS(mutex); } } } void P_SignalCv(P_Cv *cv, i32 count) { atomic64_fetch_add(&cv->wake_gen, 1); P_Wake(&cv->wake_gen, count); } //////////////////////////////// //~ Counter void P_CounterAdd(P_Counter *counter, i64 x) { i64 old_v = atomic64_fetch_add(&counter->v, x); i64 new_v = old_v + x; if (old_v > 0 && new_v <= 0) { P_Wake(&counter->v, I32Max); } } void P_WaitOnCounter(P_Counter *counter) { i64 v = atomic64_fetch(&counter->v); while (v > 0) { P_Wait(&counter->v, &v, sizeof(v), I64Max); v = atomic64_fetch(&counter->v); } }