atomically load wait address value
This commit is contained in:
parent
166db8575b
commit
4dab156b5f
@ -3,8 +3,6 @@
|
||||
|
||||
#if PLATFORM_WINDOWS
|
||||
|
||||
/* TODO: Remove "..._raw" functions */
|
||||
|
||||
FORCE_INLINE i32 atomic_i32_fetch(struct atomic_i32 *x) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); }
|
||||
FORCE_INLINE i32 atomic_i32_fetch_set(struct atomic_i32 *x, i32 e) { return (i32)_InterlockedExchange((volatile long *)&x->_v, e); }
|
||||
FORCE_INLINE i32 atomic_i32_fetch_test_set(struct atomic_i32 *x, i32 c, i32 e) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, e, c); }
|
||||
|
||||
@ -11,7 +11,10 @@ struct snc_lock {
|
||||
};
|
||||
|
||||
struct snc_mutex {
|
||||
/* Bit 31: exclusive lock held, bit 30: pending exclusive lock, bits 0-30: shared locks count */
|
||||
/* Bit 31 = Exclusive lock is held
|
||||
* Bit 30 = Exclusive lock is pending
|
||||
* Bit 0-30 = Shared locks count
|
||||
*/
|
||||
struct atomic_u32 v;
|
||||
|
||||
#if RTC
|
||||
|
||||
@ -115,13 +115,8 @@ struct alignas(64) wait_list {
|
||||
/* =================================================== */
|
||||
struct wait_list *prev_in_bin; /* 08 bytes */
|
||||
/* =================================================== */
|
||||
u8 _pad0[8]; /* 08 bytes (padding) */
|
||||
u8 _pad0[32]; /* 32 bytes (padding) */
|
||||
/* =================================================== */
|
||||
u8 _pad1[8]; /* 08 bytes (padding) */
|
||||
/* =================================================== */
|
||||
u8 _pad2[8]; /* 08 bytes (padding) */
|
||||
/* =================================================== */
|
||||
u8 _pad3[8]; /* 08 bytes (padding) */
|
||||
};
|
||||
STATIC_ASSERT(sizeof(struct wait_list) == 64); /* Padding validation (increase if necessary) */
|
||||
STATIC_ASSERT(alignof(struct wait_list) == 64); /* Avoid false sharing */
|
||||
@ -143,19 +138,6 @@ STATIC_ASSERT(sizeof(struct wait_bin) == 64); /* Padding validation (increase i
|
||||
STATIC_ASSERT(alignof(struct wait_bin) == 64); /* Avoid false sharing */
|
||||
|
||||
|
||||
struct alignas(64) counter {
|
||||
/* =================================================== */
|
||||
struct atomic_i64 v; /* 8 bytes */
|
||||
struct counter *next_free; /* 8 bytes */
|
||||
/* =================================================== */
|
||||
u8 _pad[48]; /* 56 bytes (padding) */
|
||||
};
|
||||
STATIC_ASSERT(sizeof(struct counter) == 64); /* Padding validation (increase if necessary) */
|
||||
STATIC_ASSERT(alignof(struct counter) == 64); /* Avoid false sharing */
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@ -176,7 +158,7 @@ struct yield_param {
|
||||
enum yield_kind kind;
|
||||
union {
|
||||
struct {
|
||||
void *addr;
|
||||
volatile void *addr;
|
||||
void *cmp;
|
||||
u32 size;
|
||||
i64 timeout_ns;
|
||||
@ -192,22 +174,24 @@ struct alignas(64) fiber {
|
||||
/* ==================================================== */
|
||||
i16 id; /* 02 bytes */
|
||||
i16 parent_id; /* 02 bytes */
|
||||
i16 next_waiter; /* 02 bytes */
|
||||
i16 prev_waiter; /* 02 bytes */
|
||||
u8 _pad0[4]; /* 04 bytes (padding) */
|
||||
/* ==================================================== */
|
||||
u64 wait_addr; /* 08 bytes */
|
||||
/* ==================================================== */
|
||||
u64 wait_time; /* 08 bytes */
|
||||
/* ==================================================== */
|
||||
i16 next_addr_waiter; /* 02 bytes */
|
||||
i16 prev_addr_waiter; /* 02 bytes */
|
||||
i16 next_time_waiter; /* 02 bytes */
|
||||
i16 prev_time_waiter; /* 02 bytes */
|
||||
/* ==================================================== */
|
||||
u8 _pad1[8]; /* 08 bytes (padding) */
|
||||
/* ==================================================== */
|
||||
u8 _pad2[8]; /* 08 bytes (padding) */
|
||||
/* ==================================================== */
|
||||
u8 _pad3[8]; /* 08 bytes (padding) */
|
||||
/* ==================================================== */
|
||||
u8 _pad4[8]; /* 08 bytes (padding) */
|
||||
/* ==================================================== */
|
||||
u8 _pad5[8]; /* 08 bytes (padding) */
|
||||
/* ==================================================== */
|
||||
|
||||
/* ==================================================== */
|
||||
/* =============== Cache line boundary ================ */
|
||||
/* ==================== Cache line ==================== */
|
||||
/* ==================================================== */
|
||||
|
||||
/* ==================================================== */
|
||||
@ -224,7 +208,7 @@ struct alignas(64) fiber {
|
||||
/* ==================================================== */
|
||||
struct yield_param *yield_param; /* 08 bytes */
|
||||
/* ==================================================== */
|
||||
u8 _pad6[8]; /* 08 bytes (padding) */
|
||||
u8 _pad3[8]; /* 08 bytes (padding) */
|
||||
|
||||
};
|
||||
STATIC_ASSERT(sizeof(struct fiber) == 128); /* Padding validation (increase if necessary) */
|
||||
@ -425,7 +409,7 @@ void sys_wake_all(void *addr)
|
||||
/* NOTE: Each array is conservatively sized as the number of all waiters in the list */
|
||||
queue_waiter_arrays[i] = arena_push_array_no_zero(scratch.arena, struct fiber *, num_waiters);
|
||||
}
|
||||
for (struct fiber *waiter = fiber_from_id(wait_list->first_waiter); waiter; waiter = fiber_from_id(waiter->next_waiter)) {
|
||||
for (struct fiber *waiter = fiber_from_id(wait_list->first_waiter); waiter; waiter = fiber_from_id(waiter->next_addr_waiter)) {
|
||||
enum job_queue_kind queue_kind = job_queue_kind_from_priority(waiter->job_priority);
|
||||
i32 index = queue_counts[queue_kind]++;
|
||||
struct fiber **array = queue_waiter_arrays[queue_kind];
|
||||
@ -583,6 +567,12 @@ INTERNAL struct fiber *fiber_alloc(enum fiber_kind kind)
|
||||
fiber->addr = ConvertThreadToFiber((void *)(i64)fiber_id);
|
||||
}
|
||||
}
|
||||
fiber->wait_addr = 0;
|
||||
fiber->wait_time = 0;
|
||||
fiber->prev_addr_waiter = 0;
|
||||
fiber->next_addr_waiter = 0;
|
||||
fiber->prev_time_waiter = 0;
|
||||
fiber->next_time_waiter = 0;
|
||||
fiber->job_func = 0;
|
||||
fiber->job_sig = 0;
|
||||
fiber->job_id = 0;
|
||||
@ -879,7 +869,7 @@ INTERNAL SYS_THREAD_DEF(job_worker_entry, worker_ctx_arg)
|
||||
|
||||
case YIELD_KIND_WAIT:
|
||||
{
|
||||
void *wait_addr = yield.wait.addr;
|
||||
volatile void *wait_addr = yield.wait.addr;
|
||||
void *wait_cmp = yield.wait.cmp;
|
||||
u32 wait_size = yield.wait.size;
|
||||
|
||||
@ -888,7 +878,16 @@ INTERNAL SYS_THREAD_DEF(job_worker_entry, worker_ctx_arg)
|
||||
|
||||
while (atomic_i32_fetch_test_set(&bin->lock, 0, 1) != 0) ix_pause();
|
||||
{
|
||||
if (MEMEQ(wait_addr, wait_cmp, wait_size)) {
|
||||
/* Load and compare values now that bin is locked */
|
||||
b32 cancel_wait;
|
||||
switch (wait_size) {
|
||||
case 1: cancel_wait = (u8)_InterlockedCompareExchange8(wait_addr, 0, 0) == *(u8 *)wait_cmp; break;
|
||||
case 2: cancel_wait = (u16)_InterlockedCompareExchange16(wait_addr, 0, 0) == *(u16 *)wait_cmp; break;
|
||||
case 4: cancel_wait = (u32)_InterlockedCompareExchange(wait_addr, 0, 0) == *(u32 *)wait_cmp; break;
|
||||
case 8: cancel_wait = (u64)_InterlockedCompareExchange64(wait_addr, 0, 0) == *(u64 *)wait_cmp; break;
|
||||
default: cancel_wait = true; ASSERT(false); break; /* Invalid wait size */
|
||||
}
|
||||
if (!cancel_wait) {
|
||||
/* Search addr wait list in bin */
|
||||
struct wait_list *wait_list = NULL;
|
||||
for (struct wait_list *tmp = bin->first_wait_list; tmp && !wait_list; tmp = tmp->next_in_bin) {
|
||||
@ -921,9 +920,10 @@ INTERNAL SYS_THREAD_DEF(job_worker_entry, worker_ctx_arg)
|
||||
}
|
||||
|
||||
/* Insert fiber into wait list */
|
||||
job_fiber->wait_addr = (u64)wait_addr;
|
||||
if (wait_list->last_waiter) {
|
||||
fiber_from_id(wait_list->last_waiter)->next_waiter = job_fiber_id;
|
||||
job_fiber->prev_waiter = wait_list->last_waiter;
|
||||
fiber_from_id(wait_list->last_waiter)->next_addr_waiter = job_fiber_id;
|
||||
job_fiber->prev_addr_waiter = wait_list->last_waiter;
|
||||
} else {
|
||||
wait_list->first_waiter = job_fiber_id;
|
||||
}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user