108 lines
3.0 KiB
C
108 lines
3.0 KiB
C
#include "thread_local.h"
|
|
#include "sys.h"
|
|
#include "arena.h"
|
|
#include "atomic.h"
|
|
#include "intrinsics.h"
|
|
|
|
#define THREAD_LOCAL_STORE_RESERVE (MEGABYTE(64))
|
|
|
|
/* Arbitrary. Increase if needed. */
|
|
#define MAX_THREAD_LOCAL_VARS 256
|
|
|
|
GLOBAL struct {
|
|
struct atomic_i32 metas_lock_flag;
|
|
u64 metas_count;
|
|
struct thread_local_var_meta metas[MAX_THREAD_LOCAL_VARS];
|
|
} G = ZI, DEBUG_ALIAS(G, G_thread_local);
|
|
|
|
INTERNAL void metas_lock(void)
|
|
{
|
|
while (atomic_i32_eval_compare_exchange(&G.metas_lock_flag, 0, 1) == 0) {
|
|
/* Spinlock */
|
|
ix_pause();
|
|
}
|
|
}
|
|
|
|
INTERNAL void metas_unlock(void)
|
|
{
|
|
atomic_i32_eval_exchange(&G.metas_lock_flag, 0);
|
|
}
|
|
|
|
struct thread_local_store thread_local_store_alloc(void)
|
|
{
|
|
__prof;
|
|
struct thread_local_store t = ZI;
|
|
t.arena = arena_alloc(THREAD_LOCAL_STORE_RESERVE);
|
|
t.lookup = arena_push_array_zero(&t.arena, void *, MAX_THREAD_LOCAL_VARS);
|
|
t.allocation_order = arena_push_array_zero(&t.arena, u64, MAX_THREAD_LOCAL_VARS);
|
|
return t;
|
|
}
|
|
|
|
void thread_local_store_release(struct thread_local_store *t)
|
|
{
|
|
__prof;
|
|
/* Release allocated vars in reverse order */
|
|
metas_lock();
|
|
{
|
|
for (u64 i = t->allocation_order_count; i-- > 0;) {
|
|
u64 id = t->allocation_order[i];
|
|
void *data = t->lookup[id];
|
|
struct thread_local_var_meta *meta = &G.metas[id];
|
|
if (meta->release) {
|
|
meta->release(data);
|
|
}
|
|
}
|
|
}
|
|
metas_unlock();
|
|
|
|
arena_release(&t->arena);
|
|
}
|
|
|
|
void *_thread_local_var_eval(struct thread_local_var_meta *meta)
|
|
{
|
|
/* Register var if unregistered */
|
|
u64 id;
|
|
{
|
|
u64 id_plus_one = atomic_u64_eval(&meta->id_plus_one);
|
|
if (id_plus_one == 0) {
|
|
__profscope(_thread_local_var_eval__REGISTER);
|
|
metas_lock();
|
|
{
|
|
id_plus_one = atomic_u64_eval(&meta->id_plus_one); /* Re-check now that locked */
|
|
if (id_plus_one == 0) {
|
|
id = G.metas_count++;
|
|
if (id >= MAX_THREAD_LOCAL_VARS) {
|
|
sys_panic(STR("Maximum number of thread local variables reached"));
|
|
}
|
|
atomic_u64_eval_exchange(&meta->id_plus_one, id + 1);
|
|
G.metas[id] = *meta;
|
|
} else {
|
|
id = id_plus_one - 1;
|
|
}
|
|
}
|
|
metas_unlock();
|
|
} else {
|
|
id = id_plus_one - 1;
|
|
}
|
|
}
|
|
|
|
/* Allocate var for thread if unallocated */
|
|
struct thread_local_store *t = sys_thread_get_thread_local_store();
|
|
void *data = t->lookup[id];
|
|
if (!data) {
|
|
__profscope(_thread_local_var_eval__ALLOC);
|
|
/* Allocate */
|
|
arena_align(&t->arena, meta->align);
|
|
data = arena_push_array(&t->arena, u8, meta->size);
|
|
if (meta->alloc) {
|
|
meta->alloc(data);
|
|
} else {
|
|
MEMZERO(data, meta->size);
|
|
}
|
|
t->lookup[id] = data;
|
|
t->allocation_order[t->allocation_order_count++] = id;
|
|
}
|
|
|
|
return data;
|
|
}
|