use ticket mutex for wait bins

This commit is contained in:
jacob 2025-07-10 14:08:41 -05:00
parent e3e0a98456
commit 63a8dc7595
2 changed files with 30 additions and 46 deletions

View File

@ -7,7 +7,7 @@
#if PROFILING
#define PROFILING_SYSTEM_TRACE 1
#define PROFILING_SYSTEM_TRACE 0
#define PROFILING_CAPTURE_FRAME_IMAGE 0
#define PROFILING_LOCKS 0
#define PROFILING_D3D 1

View File

@ -45,6 +45,11 @@
/* Arbitrary threshold for determining when to fall back from a looped WakeByAddressSingle to WakeByAddressAll */
#define WAKE_ALL_THRESHOLD 8
struct ticket_mutex {
struct atomic_i64_padded ticket;
struct atomic_i64_padded serving;
};
struct win32_thread {
sys_thread_func *entry_point;
void *thread_data;
@ -106,37 +111,21 @@ struct win32_window {
#define NUM_WAIT_TIME_BINS 1024
struct alignas(64) wait_list {
/* =================================================== */
u64 value; /* 08 bytes */
/* =================================================== */
i16 first_waiter; /* 02 bytes */
i16 last_waiter; /* 02 bytes */
i32 num_waiters; /* 04 bytes */
/* =================================================== */
struct wait_list *next_in_bin; /* 08 bytes */
/* =================================================== */
struct wait_list *prev_in_bin; /* 08 bytes */
/* =================================================== */
u8 _pad0[32]; /* 32 bytes (padding) */
/* =================================================== */
u64 value;
i16 first_waiter;
i16 last_waiter;
i32 num_waiters;
struct wait_list *next_in_bin;
struct wait_list *prev_in_bin;
};
STATIC_ASSERT(sizeof(struct wait_list) == 64); /* Padding validation (increase if necessary) */
STATIC_ASSERT(alignof(struct wait_list) == 64); /* Avoid false sharing */
struct alignas(64) wait_bin {
/* =================================================== */
struct wait_list *first_wait_list; /* 08 bytes */
/* =================================================== */
struct wait_list *last_wait_list; /* 08 bytes */
/* =================================================== */
struct wait_list *first_free_wait_list; /* 08 bytes */
/* =================================================== */
struct atomic_i32 lock; /* 04 bytes */
u8 _pad0[4]; /* 04 bytes (padding) */
/* =================================================== */
u8 _pad1[32]; /* 32 bytes (padding) */
struct wait_list *first_wait_list;
struct wait_list *last_wait_list;
struct wait_list *first_free_wait_list;
struct ticket_mutex lock;
};
STATIC_ASSERT(sizeof(struct wait_bin) == 64); /* Padding validation (increase if necessary) */
STATIC_ASSERT(alignof(struct wait_bin) == 64); /* Avoid false sharing */
enum yield_kind {
@ -234,11 +223,6 @@ enum job_queue_kind {
NUM_JOB_QUEUE_KINDS
};
struct ticket_mutex {
struct atomic_i64_padded ticket;
struct atomic_i64_padded serving;
};
struct alignas(64) job_queue {
enum job_queue_kind kind;
@ -424,7 +408,7 @@ void sys_wake(void *addr, i32 count)
i32 num_waiters = 0;
struct fiber **waiters = 0;
{
while (atomic_i32_fetch_test_set(&wait_addr_bin->lock, 0, 1) != 0) ix_pause();
tm_lock(&wait_addr_bin->lock);
{
/* Search for wait addr list */
for (struct wait_list *tmp = wait_addr_bin->first_wait_list; tmp && !wait_addr_list; tmp = tmp->next_in_bin) {
@ -444,7 +428,7 @@ void sys_wake(void *addr, i32 count)
}
}
}
atomic_i32_fetch_set(&wait_addr_bin->lock, 0);
tm_unlock(&wait_addr_bin->lock);
}
for (i32 i = 0; i < num_waiters; ++i) {
@ -453,7 +437,7 @@ void sys_wake(void *addr, i32 count)
u64 wait_time_bin_index = wait_time % NUM_WAIT_TIME_BINS;
struct wait_bin *wait_time_bin = &G.wait_time_bins[wait_time_bin_index];
if (wait_time != 0) while (atomic_i32_fetch_test_set(&wait_time_bin->lock, 0, 1) != 0) ix_pause();
if (wait_time != 0) tm_lock(&wait_time_bin->lock);
{
/* Search for wait time list */
struct wait_list *wait_time_list = 0;
@ -537,7 +521,7 @@ void sys_wake(void *addr, i32 count)
waiter->next_time_waiter = 0;
}
}
if (wait_time != 0) atomic_i32_fetch_set(&wait_time_bin->lock, 0);
if (wait_time != 0) tm_unlock(&wait_time_bin->lock);
}
/* Resume waiters */
@ -979,9 +963,9 @@ INTERNAL SYS_THREAD_DEF(job_worker_entry, worker_ctx_arg)
struct wait_bin *wait_addr_bin = &G.wait_addr_bins[wait_addr_bin_index];
struct wait_bin *wait_time_bin = &G.wait_time_bins[wait_time_bin_index];
if (wait_addr != 0) while (atomic_i32_fetch_test_set(&wait_addr_bin->lock, 0, 1) != 0) ix_pause();
if (wait_addr != 0) tm_lock(&wait_addr_bin->lock);
{
if (wait_time != 0) while (atomic_i32_fetch_test_set(&wait_time_bin->lock, 0, 1) != 0) ix_pause();
if (wait_time != 0) tm_lock(&wait_time_bin->lock);
{
b32 cancel_wait = wait_addr == 0 && wait_time == 0;
if (wait_addr != 0) {
@ -1085,9 +1069,9 @@ INTERNAL SYS_THREAD_DEF(job_worker_entry, worker_ctx_arg)
done = 1;
}
}
if (wait_time != 0) atomic_i32_fetch_set(&wait_time_bin->lock, 0);
if (wait_time != 0) tm_unlock(&wait_time_bin->lock);
}
if (wait_addr != 0) atomic_i32_fetch_set(&wait_addr_bin->lock, 0);
if (wait_addr != 0) tm_unlock(&wait_addr_bin->lock);
} break;
case YIELD_KIND_DONE:
@ -1193,7 +1177,7 @@ INTERNAL SYS_THREAD_DEF(job_scheduler_entry, _)
i32 num_waiters = 0;
struct fiber **waiters = 0;
{
while (atomic_i32_fetch_test_set(&wait_time_bin->lock, 0, 1) != 0) ix_pause();
tm_lock(&wait_time_bin->lock);
{
/* Search for wait time list */
for (struct wait_list *tmp = wait_time_bin->first_wait_list; tmp && !wait_time_list; tmp = tmp->next_in_bin) {
@ -1214,7 +1198,7 @@ INTERNAL SYS_THREAD_DEF(job_scheduler_entry, _)
}
}
}
atomic_i32_fetch_set(&wait_time_bin->lock, 0);
tm_unlock(&wait_time_bin->lock);
}
/* Update wait lists */
@ -1224,7 +1208,7 @@ INTERNAL SYS_THREAD_DEF(job_scheduler_entry, _)
u64 wait_addr_bin_index = wait_addr % NUM_WAIT_ADDR_BINS;
struct wait_bin *wait_addr_bin = &G.wait_addr_bins[wait_addr_bin_index];
if (wait_addr != 0) while (atomic_i32_fetch_test_set(&wait_addr_bin->lock, 0, 1) != 0) ix_pause();
if (wait_addr != 0) tm_lock(&wait_addr_bin->lock);
{
/* Search for wait addr list */
struct wait_list *wait_addr_list = 0;
@ -1236,7 +1220,7 @@ INTERNAL SYS_THREAD_DEF(job_scheduler_entry, _)
}
}
while (atomic_i32_fetch_test_set(&wait_time_bin->lock, 0, 1) != 0) ix_pause();
tm_lock(&wait_time_bin->lock);
{
/* Remove from addr list */
if (wait_addr_list) {
@ -1311,9 +1295,9 @@ INTERNAL SYS_THREAD_DEF(job_scheduler_entry, _)
waiter->next_time_waiter = 0;
}
}
atomic_i32_fetch_set(&wait_time_bin->lock, 0);
tm_unlock(&wait_time_bin->lock);
}
if (wait_addr != 0) atomic_i32_fetch_set(&wait_addr_bin->lock, 0);
if (wait_addr != 0) tm_unlock(&wait_addr_bin->lock);
}
/* Resume waiters */