195 lines
4.2 KiB
C
195 lines
4.2 KiB
C
////////////////////////////////
|
|
//~ Mutex
|
|
|
|
//- Exclusive mutex lock
|
|
P_Lock P_LockSpinE(P_Mutex *m, i32 spin)
|
|
{
|
|
b32 locked = 0;
|
|
i32 spin_cnt = 0;
|
|
while (!locked)
|
|
{
|
|
++spin_cnt;
|
|
u32 v = Atomic32FetchTestSet(&m->v, 0, 0x80000000);
|
|
if (v == 0)
|
|
{
|
|
locked = 1;
|
|
}
|
|
else if (v == 0x40000000)
|
|
{
|
|
/* Lock has pending bit set, try to lock */
|
|
u32 swp = Atomic32FetchTestSet(&m->v, v, 0x80000000);
|
|
while (swp != v && swp == 0x40000000)
|
|
{
|
|
v = swp;
|
|
swp = Atomic32FetchTestSet(&m->v, v, 0x80000000);
|
|
}
|
|
v = swp;
|
|
if (v == 0x40000000)
|
|
{
|
|
locked = 1;
|
|
}
|
|
}
|
|
if (!locked && (v & 0xC0000000) == 0)
|
|
{
|
|
/* Lock has shared lockers and no pending waiter, set pending bit */
|
|
u32 swp = Atomic32FetchTestSet(&m->v, v, v | 0x40000000);
|
|
while (swp != v && (swp & 0xC0000000) == 0 && swp != 0)
|
|
{
|
|
v = swp;
|
|
swp = Atomic32FetchTestSet(&m->v, v, v | 0x40000000);
|
|
}
|
|
v = swp;
|
|
}
|
|
/* Pause or wait */
|
|
if (!locked && v != 0 && v != 0x40000000)
|
|
{
|
|
if (spin_cnt < spin)
|
|
{
|
|
IxPause();
|
|
}
|
|
else
|
|
{
|
|
P_Wait(&m->v, &v, 4, I64Max);
|
|
spin_cnt = 0;
|
|
}
|
|
}
|
|
}
|
|
|
|
#if RtcIsEnabled
|
|
Atomic32FetchSet(&m->exclusive_fiber_id, FiberId());
|
|
#endif
|
|
|
|
P_Lock lock = ZI;
|
|
lock.exclusive = 1;
|
|
lock.mutex = m;
|
|
return lock;
|
|
}
|
|
|
|
//- Shared mutex lock
|
|
P_Lock P_LockSpinS(P_Mutex *m, i32 spin)
|
|
{
|
|
b32 locked = 0;
|
|
i32 spin_cnt = 0;
|
|
while (!locked)
|
|
{
|
|
++spin_cnt;
|
|
u32 v = Atomic32Fetch(&m->v);
|
|
while (!locked && (v & 0xC0000000) == 0)
|
|
{
|
|
/* Lock has no exclusive or pending exclusive lock, increment shared count */
|
|
u32 swp = Atomic32FetchTestSet(&m->v, v, v + 1);
|
|
if (v == swp)
|
|
{
|
|
locked = 1;
|
|
}
|
|
else
|
|
{
|
|
v = swp;
|
|
}
|
|
}
|
|
/* Pause or wait */
|
|
if (!locked)
|
|
{
|
|
if (spin_cnt < spin)
|
|
{
|
|
IxPause();
|
|
}
|
|
else
|
|
{
|
|
P_Wait(&m->v, &v, 4, I64Max);
|
|
spin_cnt = 0;
|
|
}
|
|
}
|
|
}
|
|
P_Lock lock = ZI;
|
|
lock.mutex = m;
|
|
return lock;
|
|
}
|
|
|
|
//- Mutex lock wrappers
|
|
P_Lock P_LockE(P_Mutex *m)
|
|
{
|
|
return P_LockSpinE(m, P_DefaultMutexSpin);
|
|
}
|
|
|
|
P_Lock P_LockS(P_Mutex *m)
|
|
{
|
|
return P_LockSpinS(m, P_DefaultMutexSpin);
|
|
}
|
|
|
|
void P_Unlock(P_Lock *l)
|
|
{
|
|
P_Mutex *m = l->mutex;
|
|
if (l->exclusive)
|
|
{
|
|
#if RtcIsEnabled
|
|
Atomic32FetchSet(&m->exclusive_fiber_id, 0);
|
|
#endif
|
|
Atomic32FetchSet(&m->v, 0);
|
|
}
|
|
else
|
|
{
|
|
Atomic32FetchAdd(&m->v, -1);
|
|
}
|
|
P_Wake(&m->v, I32Max);
|
|
ZeroStruct(l);
|
|
}
|
|
|
|
////////////////////////////////
|
|
//~ Condition variable
|
|
|
|
void P_WaitOnCv(P_Cv *cv, P_Lock *l)
|
|
{
|
|
P_WaitOnCvTime(cv, l, I64Max);
|
|
}
|
|
|
|
void P_WaitOnCvTime(P_Cv *cv, P_Lock *l, i64 timeout_ns)
|
|
{
|
|
u64 old_wake_gen = Atomic64Fetch(&cv->wake_gen);
|
|
P_Mutex *mutex = l->mutex;
|
|
b32 exclusive = l->exclusive;
|
|
{
|
|
P_Unlock(l);
|
|
{
|
|
P_Wait(&cv->wake_gen, &old_wake_gen, sizeof(old_wake_gen), timeout_ns);
|
|
}
|
|
if (exclusive)
|
|
{
|
|
*l = P_LockE(mutex);
|
|
}
|
|
else
|
|
{
|
|
*l = P_LockS(mutex);
|
|
}
|
|
}
|
|
}
|
|
|
|
void P_SignalCv(P_Cv *cv, i32 count)
|
|
{
|
|
Atomic64FetchAdd(&cv->wake_gen, 1);
|
|
P_Wake(&cv->wake_gen, count);
|
|
}
|
|
|
|
////////////////////////////////
|
|
//~ Counter
|
|
|
|
void P_CounterAdd(P_Counter *counter, i64 x)
|
|
{
|
|
i64 old_v = Atomic64FetchAdd(&counter->v, x);
|
|
i64 new_v = old_v + x;
|
|
if (old_v > 0 && new_v <= 0)
|
|
{
|
|
P_Wake(&counter->v, I32Max);
|
|
}
|
|
}
|
|
|
|
void P_WaitOnCounter(P_Counter *counter)
|
|
{
|
|
i64 v = Atomic64Fetch(&counter->v);
|
|
while (v > 0)
|
|
{
|
|
P_Wait(&counter->v, &v, sizeof(v), I64Max);
|
|
v = Atomic64Fetch(&counter->v);
|
|
}
|
|
}
|