separate fiber free lists by job pool

This commit is contained in:
jacob 2025-07-11 12:48:35 -05:00
parent e13aca535a
commit 047f5c8359

View File

@ -240,6 +240,9 @@ struct alignas(64) job_pool {
/* Jobs */
struct job_queue job_queues[NUM_SYS_PRIORITIES];
struct ticket_mutex free_fibers_lock;
i16 first_free_fiber_id;
/* Workers */
struct atomic32_padded workers_shutdown;
struct atomic64_padded num_jobs_in_queue;
@ -304,10 +307,9 @@ GLOBAL struct {
struct atomic64_padded current_scheduler_cycle_period_ns;
/* Fibers */
i16 num_fibers;
i16 first_free_fiber_id;
struct arena *fiber_names_arena;
struct ticket_mutex fibers_lock;
i16 num_fibers;
struct arena *fiber_names_arena;
struct fiber fibers[SYS_MAX_FIBERS];
/* Wait lists */
@ -608,7 +610,7 @@ INTERNAL void wake_address(void *addr, i32 count)
}
}
/* Lock & build fibers array */
/* Lock fibers & build array */
if (wait_addr_list) {
fibers = arena_push_array_no_zero(scratch.arena, struct fiber *, wait_addr_list->num_waiters);
for (struct fiber *fiber = fiber_from_id(wait_addr_list->first_waiter); fiber && num_fibers < count; fiber = fiber_from_id(fiber->next_addr_waiter)) {
@ -622,7 +624,9 @@ INTERNAL void wake_address(void *addr, i32 count)
tm_unlock(&wait_addr_bin->lock);
}
wake_fibers_locked(num_fibers, fibers);
if (num_fibers > 0) {
wake_fibers_locked(num_fibers, fibers);
}
/* Wake win32 blocking thread waiters */
if (count >= WAKE_ALL_THRESHOLD) {
@ -686,35 +690,39 @@ void sys_wake(void *addr, i32 count)
* Fibers
* ========================== */
enum fiber_kind {
FIBER_KIND_CONVERTED_THREAD,
FIBER_KIND_JOB_WORKER
};
INTERNAL void job_fiber_entry(void *id_ptr);
INTERNAL struct fiber *fiber_alloc(enum fiber_kind kind)
/* If `pool` is 0, then the currently running thread will be converted into a fiber */
INTERNAL struct fiber *fiber_alloc(struct job_pool *pool)
{
i16 fiber_id = 0;
struct fiber *fiber = 0;
char *new_name_cstr = 0;
{
tm_lock(&G.fibers_lock);
{
fiber_id = G.first_free_fiber_id;
if (fiber_id && kind == FIBER_KIND_JOB_WORKER) {
if (pool != 0) {
tm_lock(&pool->free_fibers_lock);
if (pool->first_free_fiber_id) {
fiber_id = pool->first_free_fiber_id;
fiber = &G.fibers[fiber_id];
G.first_free_fiber_id = fiber->parent_id;
} else {
fiber_id = G.num_fibers++;
if (fiber_id >= SYS_MAX_FIBERS) {
sys_panic(LIT("Max fibers reached"));
}
fiber = &G.fibers[fiber_id];
new_name_cstr = arena_push_array(G.fiber_names_arena, char, FIBER_NAME_MAX_SIZE);
pool->first_free_fiber_id = fiber->parent_id;
}
tm_unlock(&pool->free_fibers_lock);
}
tm_unlock(&G.fibers_lock);
if (!fiber_id) {
tm_lock(&G.fibers_lock);
{
{
fiber_id = G.num_fibers++;
if (fiber_id >= SYS_MAX_FIBERS) {
sys_panic(LIT("Max fibers reached"));
}
fiber = &G.fibers[fiber_id];
new_name_cstr = arena_push_array(G.fiber_names_arena, char, FIBER_NAME_MAX_SIZE);
}
}
tm_unlock(&G.fibers_lock);
}
}
if (new_name_cstr != 0) {
fiber->id = fiber_id;
@ -752,10 +760,11 @@ INTERNAL struct fiber *fiber_alloc(enum fiber_kind kind)
fiber->name_cstr = new_name_cstr;
/* Init win32 fiber */
if (kind == FIBER_KIND_JOB_WORKER) {
if (pool != 0) {
fiber->addr = CreateFiber(FIBER_STACK_SIZE, job_fiber_entry, (void *)(i64)fiber_id);
fiber->can_yield = 1;
} else {
/* Fiber is not a part of a job pool, convert thread to fiber */
fiber->addr = ConvertThreadToFiber((void *)(i64)fiber_id);
fiber->can_yield = 0;
}
@ -778,14 +787,14 @@ INTERNAL struct fiber *fiber_alloc(enum fiber_kind kind)
return fiber;
}
INTERNAL void fiber_release(struct fiber *fiber, i16 fiber_id)
INTERNAL void fiber_release(struct job_pool *pool, struct fiber *fiber, i16 fiber_id)
{
tm_lock(&G.fibers_lock);
tm_lock(&pool->free_fibers_lock);
{
fiber->parent_id = G.first_free_fiber_id;
G.first_free_fiber_id = fiber_id;
fiber->parent_id = pool->first_free_fiber_id;
pool->first_free_fiber_id = fiber_id;
}
tm_unlock(&G.fibers_lock);
tm_unlock(&pool->free_fibers_lock);
}
FORCE_INLINE struct fiber *fiber_from_id(i16 id)
@ -1007,7 +1016,7 @@ INTERNAL SYS_THREAD_DEF(job_worker_entry, worker_ctx_arg)
/* Use resumed fiber if present */
if (job_fiber_id > 0) {
if (job_fiber) {
fiber_release(job_fiber, job_fiber->id);
fiber_release(pool, job_fiber, job_fiber->id);
}
job_fiber = fiber_from_id(job_fiber_id);
}
@ -1015,7 +1024,7 @@ INTERNAL SYS_THREAD_DEF(job_worker_entry, worker_ctx_arg)
/* Run fiber */
if (job_func) {
if (!job_fiber) {
job_fiber = fiber_alloc(FIBER_KIND_JOB_WORKER);
job_fiber = fiber_alloc(pool);
}
job_fiber_id = job_fiber->id;
{
@ -2881,7 +2890,7 @@ INTERNAL void win32_thread_release(struct win32_thread *t)
INTERNAL DWORD WINAPI win32_thread_proc(LPVOID vt)
{
fiber_alloc(FIBER_KIND_CONVERTED_THREAD);
fiber_alloc(0);
struct win32_thread *t = (struct win32_thread *)vt;
__profthread(t->thread_name_cstr, t->profiler_group);
@ -3235,7 +3244,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
G.wait_lists_arena = arena_alloc(GIBI(64));
/* Convert main thread to fiber */
fiber_alloc(FIBER_KIND_CONVERTED_THREAD);
fiber_alloc(0);
/* Init job pools */
for (enum sys_pool pool_kind = 0; pool_kind < (i32)countof(G.job_pools); ++pool_kind) {