816 lines
28 KiB
C
816 lines
28 KiB
C
#include "sheet.h"
|
|
#include "arena.h"
|
|
#include "log.h"
|
|
#include "sys.h"
|
|
#include "scratch.h"
|
|
#include "resource.h"
|
|
#include "asset_cache.h"
|
|
#include "ase.h"
|
|
#include "util.h"
|
|
#include "work.h"
|
|
#include "atomic.h"
|
|
#include "thread_local.h"
|
|
#include "app.h"
|
|
|
|
#define SHEET_ARENA_RESERVE MEGABYTE(64)
|
|
#define SHEET_LOOKUP_TABLE_BUCKET_RATIO 2.0
|
|
|
|
#define TCTX_ARENA_RESERVE MEGABYTE(64)
|
|
#define CACHE_BUCKETS_COUNT 1024
|
|
|
|
#define MAX_LOADER_THREADS 4
|
|
|
|
/* Size of cache memory until evictor starts evicting */
|
|
#define CACHE_MEMORY_BUDGET MEGABYTE(8)
|
|
|
|
/* How long between evictor thread scans */
|
|
#define EVICTOR_CHECK_INTERVAl 0.500
|
|
|
|
/* Time a cache entry spends unused it's considered evictable */
|
|
#define EVICTOR_GRACE_PERIOD 10.000
|
|
|
|
/* ========================== *
|
|
* Loader cmd structs
|
|
* ========================== */
|
|
|
|
struct loader_cmd {
|
|
struct loader_cmd *next;
|
|
struct loader_cmd *next_free;
|
|
|
|
struct cache_node *cache_node;
|
|
struct sheet_tag tag;
|
|
u8 tag_path_buff[4096];
|
|
};
|
|
|
|
/* ========================== *
|
|
* Cache structs
|
|
* ========================== */
|
|
|
|
enum cache_node_state {
|
|
CACHE_NODE_STATE_NONE,
|
|
CACHE_NODE_STATE_QUEUED,
|
|
CACHE_NODE_STATE_WORKING,
|
|
CACHE_NODE_STATE_LOADED
|
|
};
|
|
|
|
struct cache_node {
|
|
u128 hash;
|
|
struct atomic_u32 state;
|
|
struct atomic_i32 refcount; /* Number of scopes currently holding a reference to this sheet */
|
|
struct atomic_u64 last_refcount0_ts; /* Last time that refcount reached 0 */
|
|
|
|
/* Allocated data */
|
|
u64 memory_usage;
|
|
struct arena arena;
|
|
struct sheet *sheet;
|
|
|
|
/* Hash list */
|
|
struct cache_node *next_hash;
|
|
struct cache_node *prev_hash;
|
|
|
|
/* Free list */
|
|
struct cache_node *next_free;
|
|
|
|
#if RESOURCE_RELOADING
|
|
struct sys_datetime initial_resource_file_modified_time;
|
|
u64 tag_path_len;
|
|
u8 tag_path[4096];
|
|
#endif
|
|
};
|
|
|
|
struct cache_bucket {
|
|
struct sys_rw_mutex rw_mutex;
|
|
struct cache_node *first;
|
|
};
|
|
|
|
struct cache {
|
|
struct atomic_u64 memory_usage;
|
|
struct arena arena;
|
|
struct cache_bucket *buckets;
|
|
struct sys_mutex node_pool_mutex;
|
|
struct cache_node *node_pool_first_free;
|
|
};
|
|
|
|
struct sheet_scope_reference {
|
|
struct cache_node *cache_node;
|
|
struct sheet_scope_reference *next_hash;
|
|
struct sheet_scope_reference *next_free;
|
|
};
|
|
|
|
/* ========================== *
|
|
* Global state
|
|
* ========================== */
|
|
|
|
GLOBAL struct {
|
|
/* Cache */
|
|
struct cache cache;
|
|
|
|
/* Loader threads */
|
|
b32 loaders_shutdown;
|
|
struct sys_mutex loaders_mutex;
|
|
struct sys_condition_variable loaders_cv;
|
|
struct arena loader_cmd_arena;
|
|
struct loader_cmd *first_free_loader_cmd;
|
|
struct loader_cmd *first_loader_cmd;
|
|
struct loader_cmd *last_loader_cmd;
|
|
u64 loader_threads_count;
|
|
struct sys_thread loader_threads[MAX_LOADER_THREADS];
|
|
|
|
/* Evictor thread */
|
|
b32 evictor_shutdown;
|
|
struct sys_mutex evictor_mutex;
|
|
struct sys_condition_variable evictor_cv;
|
|
|
|
struct sys_thread evictor_thread;
|
|
} G = { 0 }, DEBUG_ALIAS(G, G_sheet);
|
|
|
|
GLOBAL READONLY struct sheet g_sheet_nil = { 0 };
|
|
GLOBAL READONLY struct sheet g_sheet_loading = {
|
|
.loading = true
|
|
};
|
|
|
|
|
|
/* ========================== *
|
|
* Thread local state
|
|
* ========================== */
|
|
|
|
struct sheet_tctx {
|
|
struct arena arena;
|
|
struct sheet_scope *first_free_scope;
|
|
struct sheet_scope_reference *first_free_reference;
|
|
};
|
|
|
|
INTERNAL THREAD_LOCAL_VAR_ALLOC_FUNC_DEF(sheet_tctx_alloc, vtctx)
|
|
{
|
|
struct sheet_tctx *tctx = (struct sheet_tctx *)vtctx;
|
|
tctx->arena = arena_alloc(MEGABYTE(64));
|
|
}
|
|
|
|
INTERNAL THREAD_LOCAL_VAR_RELEASE_FUNC_DEF(sheet_tctx_release, vtctx)
|
|
{
|
|
struct sheet_tctx *tctx = (struct sheet_tctx *)vtctx;
|
|
arena_release(&tctx->arena);
|
|
}
|
|
|
|
GLOBAL THREAD_LOCAL_VAR_DEF(tl_sheet_tctx, struct sheet_tctx, sheet_tctx_alloc, sheet_tctx_release);
|
|
|
|
/* ========================== *
|
|
* Startup
|
|
* ========================== */
|
|
|
|
INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(sheet_shutdown);
|
|
INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_loader_thread_entry_point, arg);
|
|
INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_evictor_thread_entry_point, arg);
|
|
|
|
struct sheet_startup_receipt sheet_startup(struct work_startup_receipt *work_sr,
|
|
struct asset_cache_startup_receipt *asset_cache_sr,
|
|
struct resource_startup_receipt *resource_sr)
|
|
{
|
|
(UNUSED)work_sr;
|
|
(UNUSED)asset_cache_sr;
|
|
(UNUSED)resource_sr;
|
|
|
|
G.cache.node_pool_mutex = sys_mutex_alloc();
|
|
G.cache.arena = arena_alloc(GIGABYTE(64));
|
|
G.cache.buckets = arena_push_array_zero(&G.cache.arena, struct cache_bucket, CACHE_BUCKETS_COUNT);
|
|
for (u64 i = 0; i < CACHE_BUCKETS_COUNT; ++i) {
|
|
G.cache.buckets[i].rw_mutex = sys_rw_mutex_alloc();
|
|
}
|
|
|
|
G.loader_cmd_arena = arena_alloc(GIGABYTE(64));
|
|
G.loaders_mutex = sys_mutex_alloc();
|
|
G.loaders_cv = sys_condition_variable_alloc();
|
|
|
|
G.evictor_mutex = sys_mutex_alloc();
|
|
G.evictor_cv = sys_condition_variable_alloc();
|
|
|
|
{
|
|
struct temp_arena scratch = scratch_begin_no_conflict();
|
|
G.loader_threads_count = clamp_i64(1, MAX_LOADER_THREADS, sys_num_logical_processors() - 1);
|
|
for (u64 i = 0; i < G.loader_threads_count; ++i) {
|
|
struct string thread_name = string_format(scratch.arena,
|
|
STR("[P0] Sheet loader thread %F"),
|
|
FMT_UINT(i));
|
|
G.loader_threads[i] = sys_thread_alloc(sheet_loader_thread_entry_point, NULL, thread_name);
|
|
}
|
|
scratch_end(scratch);
|
|
}
|
|
G.evictor_thread = sys_thread_alloc(sheet_evictor_thread_entry_point, NULL, STR("[P0] Sheet evictor thread"));
|
|
|
|
app_register_exit_callback(&sheet_shutdown);
|
|
|
|
return (struct sheet_startup_receipt) { 0 };
|
|
}
|
|
|
|
INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(sheet_shutdown)
|
|
{
|
|
__prof;
|
|
|
|
/* Signal loaders shutdown */
|
|
sys_mutex_lock(&G.loaders_mutex);
|
|
{
|
|
G.loaders_shutdown = true;
|
|
sys_condition_variable_broadcast(&G.loaders_cv);
|
|
}
|
|
sys_mutex_unlock(&G.loaders_mutex);
|
|
|
|
/* Signal evictor shutdown */
|
|
sys_mutex_lock(&G.evictor_mutex);
|
|
{
|
|
G.evictor_shutdown = true;
|
|
sys_condition_variable_broadcast(&G.evictor_cv);
|
|
}
|
|
sys_mutex_unlock(&G.evictor_mutex);
|
|
|
|
|
|
/* Wait on threads */
|
|
for (u64 i = 0; i < G.loader_threads_count; ++i) {
|
|
sys_thread_wait_release(&G.loader_threads[i]);
|
|
}
|
|
sys_thread_wait_release(&G.evictor_thread);
|
|
}
|
|
|
|
/* ========================== *
|
|
* Tag
|
|
* ========================== */
|
|
|
|
struct sheet_tag sheet_tag_from_path(struct string path)
|
|
{
|
|
struct sheet_tag res = { 0 };
|
|
res.hash = HASH_FNV128_BASIS;
|
|
res.hash = hash_fnv128(res.hash, BUFFER_FROM_STRING(path));
|
|
res.path = path;
|
|
return res;
|
|
}
|
|
|
|
/* ========================== *
|
|
* Load
|
|
* ========================== */
|
|
|
|
INTERNAL struct sheet init_sheet_from_ase_result(struct arena *arena, struct ase_decode_sheet_result ase)
|
|
{
|
|
struct sheet sheet = { 0 };
|
|
|
|
ASSERT(ase.num_frames >= 1);
|
|
|
|
/* Init frames */
|
|
sheet.image_size = ase.image_size;
|
|
sheet.frame_size = ase.frame_size;
|
|
sheet.frames = arena_push_array_zero(arena, struct sheet_frame, ase.num_frames);
|
|
sheet.frames_count = ase.num_frames;
|
|
for (struct ase_frame *ase_frame = ase.frame_head; ase_frame; ase_frame = ase_frame->next) {
|
|
u32 index = ase_frame->index;
|
|
sheet.frames[index] = (struct sheet_frame) {
|
|
.index = index,
|
|
.duration = ase_frame->duration,
|
|
.clip = ase_frame->clip
|
|
};
|
|
}
|
|
|
|
/* Init spans */
|
|
sheet.spans_count = ase.num_spans;
|
|
if (ase.num_spans > 0) {
|
|
sheet.spans_dict = fixed_dict_init(arena, (u64)(ase.num_spans * SHEET_LOOKUP_TABLE_BUCKET_RATIO));
|
|
for (struct ase_span *ase_span = ase.span_head; ase_span; ase_span = ase_span->next) {
|
|
struct string name = string_copy(arena, ase_span->name);
|
|
struct sheet_span *span = arena_push(arena, struct sheet_span);
|
|
*span = (struct sheet_span) {
|
|
.name = name,
|
|
.start = ase_span->start,
|
|
.end = ase_span->end
|
|
};
|
|
fixed_dict_set(arena, &sheet.spans_dict, name, span);
|
|
}
|
|
|
|
}
|
|
|
|
return sheet;
|
|
}
|
|
|
|
INTERNAL void sheet_load(struct cache_node *n, struct sheet_tag tag)
|
|
{
|
|
__prof;
|
|
struct temp_arena scratch = scratch_begin_no_conflict();
|
|
|
|
atomic_u32_eval_exchange(&n->state, CACHE_NODE_STATE_WORKING);
|
|
struct string path = tag.path;
|
|
|
|
logf_info("Loading sheet \"%F\"", FMT_STR(path));
|
|
sys_timestamp_t start_ts = sys_timestamp();
|
|
|
|
ASSERT(string_ends_with(path, STR(".ase")));
|
|
|
|
n->arena = arena_alloc(SHEET_ARENA_RESERVE);
|
|
{
|
|
/* Decode */
|
|
struct ase_decode_sheet_result decoded = { 0 };
|
|
if (resource_exists(path)) {
|
|
struct resource sheet_rs = resource_open(path);
|
|
decoded = ase_decode_sheet(scratch.arena, sheet_rs.bytes);
|
|
#if RESOURCE_RELOADING
|
|
n->initial_resource_file_modified_time = sys_file_get_time(sheet_rs.file).modified;
|
|
u64 cpy_len = min_u64(tag.path.len, ARRAY_COUNT(n->tag_path));
|
|
n->tag_path_len = cpy_len;
|
|
MEMCPY(n->tag_path, tag.path.text, cpy_len);
|
|
#endif
|
|
resource_close(sheet_rs);
|
|
|
|
/* Initialize */
|
|
n->sheet = arena_push(&n->arena, struct sheet);
|
|
*n->sheet = init_sheet_from_ase_result(&n->arena, decoded);
|
|
} else {
|
|
n->sheet = &g_sheet_nil;
|
|
logf_error("Resource \"%F\" not found", path);
|
|
}
|
|
}
|
|
arena_set_readonly(&n->arena);
|
|
n->memory_usage = n->arena.committed;
|
|
atomic_u64_eval_add(&G.cache.memory_usage, n->memory_usage);
|
|
|
|
logf_info("Finished loading sheet \"%F\" in %F seconds (final size: %F bytes).",
|
|
FMT_STR(path),
|
|
FMT_FLOAT(sys_timestamp_seconds(sys_timestamp() - start_ts)),
|
|
FMT_UINT(n->arena.pos));
|
|
|
|
atomic_u32_eval_exchange(&n->state, CACHE_NODE_STATE_LOADED);
|
|
|
|
scratch_end(scratch);
|
|
}
|
|
|
|
/* ========================== *
|
|
* Scope
|
|
* ========================== */
|
|
|
|
INTERNAL void scope_ensure_reference(struct sheet_scope *scope, struct cache_node *cache_node, u64 cache_bucket_index)
|
|
{
|
|
__prof;
|
|
struct sheet_scope_reference **ref_next = &scope->reference_buckets[cache_bucket_index];
|
|
struct sheet_scope_reference *ref = *ref_next;
|
|
while (ref) {
|
|
if (ref->cache_node == cache_node) {
|
|
/* Scope already references node */
|
|
break;
|
|
} else {
|
|
ref_next = &ref->next_hash;
|
|
ref = *ref_next;
|
|
}
|
|
}
|
|
|
|
if (!ref) {
|
|
struct sheet_tctx *tctx = thread_local_var_eval(&tl_sheet_tctx);
|
|
atomic_i32_inc_eval(&cache_node->refcount);
|
|
if (tctx->first_free_reference) {
|
|
ref = tctx->first_free_reference;
|
|
tctx->first_free_reference = ref->next_free;
|
|
MEMZERO_STRUCT(ref);
|
|
} else {
|
|
ref = arena_push_zero(&tctx->arena, struct sheet_scope_reference);
|
|
}
|
|
ref->cache_node = cache_node;
|
|
*ref_next = ref;
|
|
}
|
|
}
|
|
|
|
struct sheet_scope *sheet_scope_begin(void)
|
|
{
|
|
struct sheet_tctx *tctx = thread_local_var_eval(&tl_sheet_tctx);
|
|
|
|
struct sheet_scope *res = NULL;
|
|
if (tctx->first_free_scope) {
|
|
res = tctx->first_free_scope;
|
|
tctx->first_free_scope = res->next_free;
|
|
MEMZERO(res->reference_buckets, sizeof(*res->reference_buckets) * CACHE_BUCKETS_COUNT);
|
|
*res = (struct sheet_scope) {
|
|
.reference_buckets = res->reference_buckets
|
|
};
|
|
} else {
|
|
res = arena_push_zero(&tctx->arena, struct sheet_scope);
|
|
res->reference_buckets = arena_push_array_zero(&tctx->arena, struct sheet_scope_reference *, CACHE_BUCKETS_COUNT);
|
|
}
|
|
|
|
return res;
|
|
}
|
|
|
|
void sheet_scope_end(struct sheet_scope *scope)
|
|
{
|
|
struct sheet_tctx *tctx = thread_local_var_eval(&tl_sheet_tctx);
|
|
sys_timestamp_t cur_timestamp = sys_timestamp();
|
|
for (u64 i = 0; i < CACHE_BUCKETS_COUNT; ++i) {
|
|
struct sheet_scope_reference *ref = scope->reference_buckets[i];
|
|
while (ref) {
|
|
if (atomic_i32_dec_eval(&ref->cache_node->refcount) == 0) {
|
|
/* Refcount is now 0, mark timestamp on cache node */
|
|
atomic_u64_eval_exchange(&ref->cache_node->last_refcount0_ts, cur_timestamp);
|
|
}
|
|
ref->next_free = tctx->first_free_reference;
|
|
tctx->first_free_reference = ref;
|
|
ref = ref->next_hash;
|
|
}
|
|
}
|
|
scope->next_free = tctx->first_free_scope;
|
|
tctx->first_free_scope = scope;
|
|
}
|
|
|
|
/* ========================== *
|
|
* Cache interface
|
|
* ========================== */
|
|
|
|
INTERNAL struct cache_node *node_lookup_touch(struct sheet_scope *scope, struct sheet_tag tag)
|
|
{
|
|
__prof;
|
|
|
|
struct cache_node *n = NULL;
|
|
struct cache_node *nonmatching = NULL;
|
|
struct cache_node **nonmatching_next = NULL;
|
|
|
|
u64 cache_bucket_index = tag.hash % CACHE_BUCKETS_COUNT;
|
|
struct cache_bucket *bucket = &G.cache.buckets[cache_bucket_index];
|
|
|
|
/* Lookup */
|
|
/* TODO: Spinlock */
|
|
sys_rw_mutex_lock_shared(&bucket->rw_mutex);
|
|
{
|
|
nonmatching_next = &bucket->first;
|
|
n = *nonmatching_next;
|
|
while (n) {
|
|
if (n->hash == tag.hash) {
|
|
scope_ensure_reference(scope, n, cache_bucket_index);
|
|
break;
|
|
} else {
|
|
nonmatching = n;
|
|
nonmatching_next = &nonmatching->next_hash;
|
|
n = *nonmatching_next;
|
|
}
|
|
}
|
|
}
|
|
sys_rw_mutex_unlock_shared(&bucket->rw_mutex);
|
|
|
|
/* Allocate new node if necessary */
|
|
if (!n) {
|
|
__profscope(node_lookup_allocate);
|
|
sys_rw_mutex_lock_exclusive(&bucket->rw_mutex);
|
|
{
|
|
/* Alloc node */
|
|
sys_mutex_lock(&G.cache.node_pool_mutex);
|
|
{
|
|
if (G.cache.node_pool_first_free) {
|
|
n = G.cache.node_pool_first_free;
|
|
G.cache.node_pool_first_free = n->next_free;
|
|
MEMZERO_STRUCT(n);
|
|
} else {
|
|
n = arena_push_zero(&G.cache.arena, struct cache_node);
|
|
}
|
|
}
|
|
sys_mutex_unlock(&G.cache.node_pool_mutex);
|
|
/* Init node and add to bucket */
|
|
scope_ensure_reference(scope, n, cache_bucket_index);
|
|
*nonmatching_next = n;
|
|
if (nonmatching) {
|
|
nonmatching->next_hash = n;
|
|
n->prev_hash = nonmatching;
|
|
}
|
|
n->hash = tag.hash;
|
|
}
|
|
sys_rw_mutex_unlock_exclusive(&bucket->rw_mutex);
|
|
}
|
|
|
|
return n;
|
|
}
|
|
|
|
INTERNAL struct sheet *sheet_from_tag_internal(struct sheet_scope *scope, struct sheet_tag tag, b32 await)
|
|
{
|
|
struct sheet *res = &g_sheet_loading;
|
|
struct cache_node *n = node_lookup_touch(scope, tag);
|
|
|
|
u32 state = atomic_u32_eval(&n->state);
|
|
if (state == CACHE_NODE_STATE_LOADED) {
|
|
res = n->sheet;
|
|
} else if (state == CACHE_NODE_STATE_NONE) {
|
|
if (atomic_u32_eval_compare_exchange(&n->state, CACHE_NODE_STATE_NONE, CACHE_NODE_STATE_QUEUED) == CACHE_NODE_STATE_NONE) {
|
|
/* Node is new, load sheet */
|
|
if (await) {
|
|
sheet_load(n, tag);
|
|
res = n->sheet;
|
|
} else {
|
|
sys_mutex_lock(&G.loaders_mutex);
|
|
{
|
|
/* Allocate cmd */
|
|
struct loader_cmd *cmd = NULL;
|
|
if (G.first_free_loader_cmd) {
|
|
cmd = G.first_free_loader_cmd;
|
|
G.first_free_loader_cmd = cmd->next_free;
|
|
} else {
|
|
cmd = arena_push(&G.loader_cmd_arena, struct loader_cmd);
|
|
}
|
|
|
|
/* Initialize cmd */
|
|
cmd->cache_node = n;
|
|
cmd->tag = tag;
|
|
{
|
|
u64 copy_len = min_u64(tag.path.len, ARRAY_COUNT(cmd->tag_path_buff));
|
|
cmd->tag.path.text = cmd->tag_path_buff;
|
|
MEMCPY(cmd->tag.path.text, tag.path.text, copy_len);
|
|
}
|
|
|
|
/* Add cmd to queue */
|
|
*(G.last_loader_cmd ? &G.last_loader_cmd->next : &G.first_loader_cmd) = cmd;
|
|
G.last_loader_cmd = cmd;
|
|
|
|
/* Signal work ready */
|
|
sys_condition_variable_signal(&G.loaders_cv);
|
|
}
|
|
sys_mutex_unlock(&G.loaders_mutex);
|
|
}
|
|
|
|
}
|
|
}
|
|
|
|
return res;
|
|
}
|
|
|
|
struct sheet *sheet_from_tag_await(struct sheet_scope *scope, struct sheet_tag tag)
|
|
{
|
|
__prof;
|
|
return sheet_from_tag_internal(scope, tag, true);
|
|
}
|
|
|
|
struct sheet *sheet_from_tag_async(struct sheet_scope *scope, struct sheet_tag tag)
|
|
{
|
|
__prof;
|
|
return sheet_from_tag_internal(scope, tag, false);
|
|
}
|
|
|
|
/* ========================== *
|
|
* Sheet data
|
|
* ========================== */
|
|
|
|
struct sheet_span sheet_get_span(struct sheet *sheet, struct string name)
|
|
{
|
|
struct sheet_span res = { 0 };
|
|
if (sheet->spans_count > 0) {
|
|
struct sheet_span *entry = fixed_dict_get(&sheet->spans_dict, name);
|
|
if (entry) {
|
|
res = *entry;
|
|
}
|
|
}
|
|
return res;
|
|
}
|
|
|
|
struct sheet_frame sheet_get_frame(struct sheet *sheet, u32 index)
|
|
{
|
|
if (sheet->frames_count > 0) {
|
|
index = min_u32(sheet->frames_count - 1, index);
|
|
return sheet->frames[index];
|
|
} else {
|
|
return (struct sheet_frame) {
|
|
.index = 0,
|
|
.duration = 0.1,
|
|
.clip = CLIP_ALL
|
|
};
|
|
}
|
|
}
|
|
|
|
/* ========================== *
|
|
* Loader thread
|
|
* ========================== */
|
|
|
|
INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_loader_thread_entry_point, arg)
|
|
{
|
|
__prof;
|
|
(UNUSED)arg;
|
|
|
|
while (true) {
|
|
sys_mutex_lock(&G.loaders_mutex);
|
|
|
|
if (G.loaders_shutdown) {
|
|
/* Thread shutdown */
|
|
sys_mutex_unlock(&G.loaders_mutex);
|
|
break;
|
|
} else if (!G.first_loader_cmd) {
|
|
/* Wait for work */
|
|
sys_condition_variable_wait(&G.loaders_cv, &G.loaders_mutex);
|
|
|
|
}
|
|
|
|
while (G.first_loader_cmd && !G.loaders_shutdown) {
|
|
/* Pull cmd from queue */
|
|
struct loader_cmd *cmd = G.first_loader_cmd;
|
|
G.first_loader_cmd = cmd->next;
|
|
if (G.last_loader_cmd == cmd) {
|
|
G.last_loader_cmd = NULL;
|
|
}
|
|
|
|
/* Do work (temporarily unlock) */
|
|
sys_mutex_unlock(&G.loaders_mutex);
|
|
{
|
|
sheet_load(cmd->cache_node, cmd->tag);
|
|
}
|
|
sys_mutex_lock(&G.loaders_mutex);
|
|
|
|
/* Free cmd */
|
|
cmd->next_free = G.first_free_loader_cmd;
|
|
G.first_free_loader_cmd = cmd;
|
|
}
|
|
|
|
sys_mutex_unlock(&G.loaders_mutex);
|
|
}
|
|
}
|
|
|
|
/* ========================== *
|
|
* Evictor thread
|
|
* ========================== */
|
|
|
|
struct evict_node {
|
|
b32 force_evict;
|
|
u64 last_used_ts;
|
|
struct cache_node *cache_node;
|
|
struct cache_bucket *cache_bucket;
|
|
struct evict_node *next_unsorted;
|
|
struct evict_node *next_sorted;
|
|
struct evict_node *next_evicted;
|
|
|
|
};
|
|
|
|
INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_evictor_thread_entry_point, arg)
|
|
{
|
|
(UNUSED)arg;
|
|
|
|
while (true) {
|
|
struct temp_arena scratch = scratch_begin_no_conflict();
|
|
struct evict_node *head_unsorted = NULL;
|
|
struct evict_node *oldest_sorted = NULL;
|
|
struct evict_node *head_evicted = NULL;
|
|
|
|
b32 abort = false;
|
|
sys_mutex_lock(&G.evictor_mutex);
|
|
{
|
|
/* Thread shutdown */
|
|
if (G.evictor_shutdown) {
|
|
abort = true;
|
|
} else {
|
|
/* Wait */
|
|
sys_condition_variable_wait_time(&G.evictor_cv, &G.evictor_mutex, EVICTOR_CHECK_INTERVAl);
|
|
}
|
|
|
|
if (!G.evictor_shutdown) {
|
|
sys_timestamp_t cur_timestamp = sys_timestamp();
|
|
f64 cur_time = sys_timestamp_seconds(cur_timestamp);
|
|
|
|
b32 cache_over_budget = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET;
|
|
|
|
/* Scan for evictable nodes */
|
|
{
|
|
__profscope(eviction_scan);
|
|
|
|
for (u64 i = 0; i < CACHE_BUCKETS_COUNT; ++i) {
|
|
struct cache_bucket *bucket = &G.cache.buckets[i];
|
|
sys_rw_mutex_lock_shared(&bucket->rw_mutex);
|
|
{
|
|
struct cache_node *n = bucket->first;
|
|
while (n) {
|
|
b32 consider_for_eviction = false;
|
|
b32 force_evict = false;
|
|
if (atomic_u32_eval(&n->state) == CACHE_NODE_STATE_LOADED) {
|
|
sys_timestamp_t last_used_ts = atomic_u64_eval(&n->last_refcount0_ts);
|
|
|
|
if (atomic_i32_eval(&n->refcount) <= 0) {
|
|
#if RESOURCE_RELOADING
|
|
/* Check if file changed for resource reloading */
|
|
if (!consider_for_eviction) {
|
|
struct string path = string_from_cstr_len((char *)n->tag_path, n->tag_path_len);
|
|
if (!sys_is_file(path)) {
|
|
consider_for_eviction = true;
|
|
} else {
|
|
struct sys_file file = sys_file_open_read(path);
|
|
struct sys_file_time ft = sys_file_get_time(file);
|
|
sys_file_close(file);
|
|
|
|
struct sys_datetime initial_file_time = n->initial_resource_file_modified_time;
|
|
struct sys_datetime current_file_time = ft.modified;
|
|
|
|
if (MEMCMP_STRUCT(&initial_file_time, ¤t_file_time) != 0) {
|
|
logf_info("Resource file for sheet \"%F\" has changed. Evicting to allow for reloading.", FMT_STR(path));
|
|
consider_for_eviction = true;
|
|
force_evict = true;
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
|
|
/* Check usage time */
|
|
if (!consider_for_eviction && cache_over_budget && (cur_time - sys_timestamp_seconds(last_used_ts) > EVICTOR_GRACE_PERIOD)) {
|
|
/* Cache is over budget and node hasn't been referenced in a while */
|
|
consider_for_eviction = true;
|
|
}
|
|
|
|
}
|
|
|
|
/* Add node to evict list */
|
|
if (consider_for_eviction) {
|
|
struct evict_node *evict_node = arena_push_zero(scratch.arena, struct evict_node);
|
|
evict_node->cache_node = n;
|
|
evict_node->cache_bucket = bucket;
|
|
evict_node->last_used_ts = last_used_ts;
|
|
evict_node->force_evict = force_evict;
|
|
evict_node->next_unsorted = head_unsorted;
|
|
head_unsorted = evict_node;
|
|
}
|
|
|
|
n = n->next_hash;
|
|
}
|
|
}
|
|
sys_rw_mutex_unlock_shared(&bucket->rw_mutex);
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Sort evict nodes by usage time */
|
|
{
|
|
/* TODO: Optimize sort if necessary. Currently O(n^2). */
|
|
__profscope(eviction_sort);
|
|
for (struct evict_node *en = head_unsorted; en; en = en->next_unsorted) {
|
|
sys_timestamp_t ts = en->last_used_ts;
|
|
struct evict_node *prev = NULL;
|
|
struct evict_node *next = oldest_sorted;
|
|
while (next && !(ts <= next->last_used_ts || en->force_evict)) {
|
|
prev = next;
|
|
next = next->next_sorted;
|
|
}
|
|
if (prev) {
|
|
prev->next_sorted = en;
|
|
} else {
|
|
oldest_sorted = en;
|
|
}
|
|
en->next_sorted = next;
|
|
}
|
|
}
|
|
|
|
/* Remove evictable nodes from cache table until under budget */
|
|
{
|
|
__profscope(eviction_cache_removal);
|
|
for (struct evict_node *en = oldest_sorted; en; en = en->next_sorted) {
|
|
struct cache_bucket *bucket = en->cache_bucket;
|
|
struct cache_node *n = en->cache_node;
|
|
|
|
sys_rw_mutex_lock_exclusive(&bucket->rw_mutex);
|
|
{
|
|
if (*atomic_i32_raw(&n->refcount) > 0 || (!en->force_evict && (*atomic_u64_raw(&n->last_refcount0_ts) != en->last_used_ts))) {
|
|
/* Cache node has been referenced since scan, skip eviction. */
|
|
continue;
|
|
}
|
|
if (en->force_evict || atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET) {
|
|
/* Remove from cache table */
|
|
if (n->prev_hash) {
|
|
n->prev_hash->next_hash = n->next_hash;
|
|
} else {
|
|
bucket->first = n->next_hash;
|
|
}
|
|
if (n->next_hash) {
|
|
n->next_hash->prev_hash = n->prev_hash;
|
|
}
|
|
atomic_u64_eval_add(&G.cache.memory_usage, -((i64)n->memory_usage));
|
|
/* Add to evicted list */
|
|
en->next_evicted = head_evicted;
|
|
head_evicted = en;
|
|
} else {
|
|
/* Cache is no longer over budget or force evicting, stop iteration */
|
|
break;
|
|
}
|
|
}
|
|
sys_rw_mutex_unlock_exclusive(&bucket->rw_mutex);
|
|
}
|
|
}
|
|
|
|
if (head_evicted) {
|
|
/* Release evicted node memory */
|
|
{
|
|
__profscope(eviction_memory_release);
|
|
for (struct evict_node *en = head_evicted; en; en = en->next_evicted) {
|
|
struct cache_node *n = en->cache_node;
|
|
arena_release(&n->arena);
|
|
}
|
|
}
|
|
|
|
/* Add evicted nodes to free list */
|
|
sys_mutex_lock(&G.cache.node_pool_mutex);
|
|
{
|
|
__profscope(eviction_free_list_append);
|
|
for (struct evict_node *en = head_evicted; en; en = en->next_evicted) {
|
|
struct cache_node *n = en->cache_node;
|
|
n->next_free = G.cache.node_pool_first_free;
|
|
G.cache.node_pool_first_free = n;
|
|
}
|
|
}
|
|
sys_mutex_unlock(&G.cache.node_pool_mutex);
|
|
}
|
|
}
|
|
}
|
|
sys_mutex_unlock(&G.evictor_mutex);
|
|
scratch_end(scratch);
|
|
|
|
if (abort) {
|
|
break;
|
|
}
|
|
}
|
|
}
|