89 lines
3.3 KiB
C
89 lines
3.3 KiB
C
////////////////////////////////
|
|
//~ Atomic types
|
|
|
|
/* NOTE: Must be aligned to 32 bit boundary by user */
|
|
Struct(Atomic8)
|
|
{
|
|
volatile i8 _v;
|
|
};
|
|
|
|
/* NOTE: Must be aligned to 32 bit boundary by user */
|
|
Struct(Atomic16)
|
|
{
|
|
volatile i16 _v;
|
|
};
|
|
|
|
Struct(Atomic32)
|
|
{
|
|
volatile i32 _v;
|
|
};
|
|
|
|
Struct(Atomic64)
|
|
{
|
|
volatile i64 _v;
|
|
};
|
|
|
|
////////////////////////////////
|
|
//~ Cache-line isolated atomic types
|
|
|
|
AlignedStruct(Atomic8Padded, 64)
|
|
{
|
|
Atomic8 v;
|
|
u8 _pad[60];
|
|
};
|
|
StaticAssert(sizeof(Atomic8Padded) == 64 && alignof(Atomic8Padded) == 64);
|
|
|
|
AlignedStruct(Atomic16Padded, 64)
|
|
{
|
|
Atomic16 v;
|
|
u8 _pad[60];
|
|
};
|
|
StaticAssert(sizeof(Atomic16Padded) == 64 && alignof(Atomic16Padded) == 64);
|
|
|
|
AlignedStruct(Atomic32Padded, 64)
|
|
{
|
|
Atomic32 v;
|
|
u8 _pad[60];
|
|
};
|
|
StaticAssert(sizeof(Atomic32Padded) == 64 && alignof(Atomic32Padded) == 64);
|
|
|
|
AlignedStruct(Atomic64Padded, 64)
|
|
{
|
|
Atomic64 v;
|
|
u8 _pad[56];
|
|
};
|
|
StaticAssert(sizeof(Atomic64Padded) == 64 && alignof(Atomic64Padded) == 64);
|
|
|
|
////////////////////////////////
|
|
//~ Atomic operations
|
|
|
|
#if PlatformIsWindows
|
|
|
|
ForceInline i8 Atomic8Fetch(Atomic8 *x) { return (i8)_InterlockedCompareExchange8((char *)&x->_v, 0, 0); }
|
|
ForceInline i8 Atomic8FetchSet(Atomic8 *x, i8 e) { return (i8)_InterlockedExchange8((char *)&x->_v, e); }
|
|
ForceInline i8 Atomic8FetchTestSet(Atomic8 *x, i8 c, i8 e) { return (i8)_InterlockedCompareExchange8((char *)&x->_v, e, c); }
|
|
ForceInline i8 Atomic8FetchXor(Atomic8 *x, i8 c) { return (i8)_InterlockedXor8((char *)&x->_v, c); }
|
|
ForceInline i8 Atomic8FetchAdd(Atomic8 *x, i8 a) { return (i8)_InterlockedExchangeAdd8((char *)&x->_v, a); }
|
|
|
|
ForceInline i16 Atomic16Fetch(Atomic16 *x) { return (i16)_InterlockedCompareExchange16(&x->_v, 0, 0); }
|
|
ForceInline i16 Atomic16FetchSet(Atomic16 *x, i16 e) { return (i16)_InterlockedExchange16(&x->_v, e); }
|
|
ForceInline i16 Atomic16FetchTestSet(Atomic16 *x, i16 c, i16 e) { return (i16)_InterlockedCompareExchange16(&x->_v, e, c); }
|
|
ForceInline i16 Atomic16FetchTestXor(Atomic16 *x, i16 c) { return (i16)_InterlockedXor16(&x->_v, c); }
|
|
ForceInline i16 Atomic16FetchTestAdd(Atomic16 *x, i16 a) { return (i16)_InterlockedExchangeAdd16(&x->_v, a); }
|
|
|
|
ForceInline i32 Atomic32Fetch(Atomic32 *x) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); }
|
|
ForceInline i32 Atomic32FetchSet(Atomic32 *x, i32 e) { return (i32)_InterlockedExchange((volatile long *)&x->_v, e); }
|
|
ForceInline i32 Atomic32FetchTestSet(Atomic32 *x, i32 c, i32 e) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, e, c); }
|
|
ForceInline i32 Atomic32FetchXor(Atomic32 *x, i32 c) { return (i32)_InterlockedXor((volatile long *)&x->_v, c); }
|
|
ForceInline i32 Atomic32FetchAdd(Atomic32 *x, i32 a) { return (i32)_InterlockedExchangeAdd((volatile long *)&x->_v, a); }
|
|
|
|
ForceInline i64 Atomic64Fetch(Atomic64 *x) { return (i64)_InterlockedCompareExchange64(&x->_v, 0, 0); }
|
|
ForceInline i64 Atomic64FetchSet(Atomic64 *x, i64 e) { return (i64)_InterlockedExchange64(&x->_v, e); }
|
|
ForceInline i64 Atomic64FetchTestSet(Atomic64 *x, i64 c, i64 e) { return (i64)_InterlockedCompareExchange64(&x->_v, e, c); }
|
|
ForceInline i64 Atomic64FetchXor(Atomic64 *x, i64 c) { return (i64)_InterlockedXor64(&x->_v, c); }
|
|
ForceInline i64 Atomic64FetchAdd(Atomic64 *x, i64 a) { return (i64)_InterlockedExchangeAdd64(&x->_v, a); }
|
|
|
|
#else
|
|
# error Atomics not implemented
|
|
#endif
|