use sheet evictor cycle and pair with reference count

This commit is contained in:
jacob 2024-04-30 15:36:25 -05:00
parent f414cd874b
commit 38d9bde9e6

View File

@ -11,6 +11,7 @@
#include "atomic.h" #include "atomic.h"
#include "thread_local.h" #include "thread_local.h"
#include "app.h" #include "app.h"
#include "intrinsics.h"
#define SHEET_ARENA_RESERVE MEGABYTE(64) #define SHEET_ARENA_RESERVE MEGABYTE(64)
#define SHEET_LOOKUP_TABLE_BUCKET_RATIO 2.0 #define SHEET_LOOKUP_TABLE_BUCKET_RATIO 2.0
@ -24,9 +25,9 @@
#define CACHE_MEMORY_BUDGET MEGABYTE(8) #define CACHE_MEMORY_BUDGET MEGABYTE(8)
/* How long between evictor thread scans */ /* How long between evictor thread scans */
#define EVICTOR_CHECK_INTERVAl 0.500 #define EVICTOR_CYCLE_INTERVAL 0.500
/* Time a cache entry spends unused it's considered evictable */ /* Time a cache entry spends unused until it's considered evictable (at granularity of EVICTOR_CYCLE_INTERVAL) */
#define EVICTOR_GRACE_PERIOD 10.000 #define EVICTOR_GRACE_PERIOD 10.000
/* ========================== * /* ========================== *
@ -53,11 +54,16 @@ enum cache_node_state {
CACHE_NODE_STATE_LOADED CACHE_NODE_STATE_LOADED
}; };
struct cache_node_refcount {
i32 count; /* Number of scopes currently holding a reference to this sheet */
u32 last_modified_cycle; /* Last time that refcount was modified */
};
CT_ASSERT(sizeof(struct cache_node_refcount) == 8); /* Must fit into 64 bit atomic */
struct cache_node { struct cache_node {
u128 hash; u128 hash;
struct atomic_u32 state; struct atomic_u32 state;
struct atomic_i32 refcount; /* Number of scopes currently holding a reference to this sheet */ struct atomic_u64 refcount_struct; /* Cast eval to `cache_node_refcount` */
struct atomic_u64 last_refcount0_ts; /* Last time that refcount reached 0 */
/* Allocated data */ /* Allocated data */
u64 memory_usage; u64 memory_usage;
@ -117,6 +123,7 @@ GLOBAL struct {
struct sys_thread loader_threads[MAX_LOADER_THREADS]; struct sys_thread loader_threads[MAX_LOADER_THREADS];
/* Evictor thread */ /* Evictor thread */
struct atomic_u32 evictor_cycle;
b32 evictor_shutdown; b32 evictor_shutdown;
struct sys_mutex evictor_mutex; struct sys_mutex evictor_mutex;
struct sys_condition_variable evictor_cv; struct sys_condition_variable evictor_cv;
@ -189,7 +196,7 @@ struct sheet_startup_receipt sheet_startup(struct work_startup_receipt *work_sr,
G.loader_threads_count = clamp_i64(1, MAX_LOADER_THREADS, sys_num_logical_processors() - 1); G.loader_threads_count = clamp_i64(1, MAX_LOADER_THREADS, sys_num_logical_processors() - 1);
for (u64 i = 0; i < G.loader_threads_count; ++i) { for (u64 i = 0; i < G.loader_threads_count; ++i) {
struct string thread_name = string_format(scratch.arena, struct string thread_name = string_format(scratch.arena,
STR("[P0] Sheet loader thread %F"), STR("[P0] Sheet loader %F"),
FMT_UINT(i)); FMT_UINT(i));
G.loader_threads[i] = sys_thread_alloc(sheet_loader_thread_entry_point, NULL, thread_name); G.loader_threads[i] = sys_thread_alloc(sheet_loader_thread_entry_point, NULL, thread_name);
} }
@ -357,8 +364,22 @@ INTERNAL void scope_ensure_reference(struct sheet_scope *scope, struct cache_nod
} }
if (!ref) { if (!ref) {
/* Increment refcount */
u32 evictor_cycle = atomic_u32_eval(&G.evictor_cycle);
u64 old_refcount_uncast = atomic_u64_eval(&cache_node->refcount_struct);
do {
struct cache_node_refcount new_refcount = *(struct cache_node_refcount *)&old_refcount_uncast;
new_refcount.count += 1;
new_refcount.last_modified_cycle = evictor_cycle;
u64 v = atomic_u64_eval_compare_exchange(&cache_node->refcount_struct, old_refcount_uncast, *(u64 *)&new_refcount);
if (v != old_refcount_uncast) {
old_refcount_uncast = v;
} else {
break;
}
} while (true);
/* Add reference to scope */
struct sheet_tctx *tctx = thread_local_var_eval(&tl_sheet_tctx); struct sheet_tctx *tctx = thread_local_var_eval(&tl_sheet_tctx);
atomic_i32_inc_eval(&cache_node->refcount);
if (tctx->first_free_reference) { if (tctx->first_free_reference) {
ref = tctx->first_free_reference; ref = tctx->first_free_reference;
tctx->first_free_reference = ref->next_free; tctx->first_free_reference = ref->next_free;
@ -394,14 +415,25 @@ struct sheet_scope *sheet_scope_begin(void)
void sheet_scope_end(struct sheet_scope *scope) void sheet_scope_end(struct sheet_scope *scope)
{ {
struct sheet_tctx *tctx = thread_local_var_eval(&tl_sheet_tctx); struct sheet_tctx *tctx = thread_local_var_eval(&tl_sheet_tctx);
sys_timestamp_t cur_timestamp = sys_timestamp();
for (u64 i = 0; i < CACHE_BUCKETS_COUNT; ++i) { for (u64 i = 0; i < CACHE_BUCKETS_COUNT; ++i) {
struct sheet_scope_reference *ref = scope->reference_buckets[i]; struct sheet_scope_reference *ref = scope->reference_buckets[i];
while (ref) { while (ref) {
if (atomic_i32_dec_eval(&ref->cache_node->refcount) == 0) { /* Decrement refcount */
/* Refcount is now 0, mark timestamp on cache node */ struct cache_node *cache_node = ref->cache_node;
atomic_u64_eval_exchange(&ref->cache_node->last_refcount0_ts, cur_timestamp); u32 evictor_cycle = atomic_u32_eval(&G.evictor_cycle);
u64 old_refcount_uncast = atomic_u64_eval(&cache_node->refcount_struct);
do {
struct cache_node_refcount new_refcount = *(struct cache_node_refcount *)&old_refcount_uncast;
new_refcount.count -= 1;
new_refcount.last_modified_cycle = evictor_cycle;
u64 v = atomic_u64_eval_compare_exchange(&cache_node->refcount_struct, old_refcount_uncast, *(u64 *)&new_refcount);
if (v != old_refcount_uncast) {
old_refcount_uncast = v;
} else {
break;
} }
} while (true);
/* Add reference to free list */
ref->next_free = tctx->first_free_reference; ref->next_free = tctx->first_free_reference;
tctx->first_free_reference = ref; tctx->first_free_reference = ref;
ref = ref->next_hash; ref = ref->next_hash;
@ -622,11 +654,11 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_loader_thread_entry_point, arg)
struct evict_node { struct evict_node {
b32 force_evict; b32 force_evict;
u64 last_used_ts; struct cache_node_refcount refcount;
struct cache_node *cache_node; struct cache_node *cache_node;
struct cache_bucket *cache_bucket; struct cache_bucket *cache_bucket;
struct evict_node *next_unsorted; struct evict_node *next_consider;
struct evict_node *next_sorted; struct evict_node *next_consider_lru;
struct evict_node *next_evicted; struct evict_node *next_evicted;
}; };
@ -637,8 +669,8 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_evictor_thread_entry_point, arg)
while (true) { while (true) {
struct temp_arena scratch = scratch_begin_no_conflict(); struct temp_arena scratch = scratch_begin_no_conflict();
struct evict_node *head_unsorted = NULL; struct evict_node *head_consider = NULL;
struct evict_node *oldest_sorted = NULL; struct evict_node *head_consider_lru = NULL;
struct evict_node *head_evicted = NULL; struct evict_node *head_evicted = NULL;
b32 abort = false; b32 abort = false;
@ -649,19 +681,17 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_evictor_thread_entry_point, arg)
abort = true; abort = true;
} else { } else {
/* Wait */ /* Wait */
sys_condition_variable_wait_time(&G.evictor_cv, &G.evictor_mutex, EVICTOR_CHECK_INTERVAl); sys_condition_variable_wait_time(&G.evictor_cv, &G.evictor_mutex, EVICTOR_CYCLE_INTERVAL);
} }
if (!G.evictor_shutdown) { if (!G.evictor_shutdown) {
sys_timestamp_t cur_timestamp = sys_timestamp(); sys_timestamp_t cur_timestamp = sys_timestamp();
f64 cur_time = sys_timestamp_seconds(cur_timestamp); f64 cur_time = sys_timestamp_seconds(cur_timestamp);
b32 cache_over_budget = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET;
/* Scan for evictable nodes */ /* Scan for evictable nodes */
{ b32 cache_over_budget = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET;
if (cache_over_budget || RESOURCE_RELOADING) {
__profscope(eviction_scan); __profscope(eviction_scan);
for (u64 i = 0; i < CACHE_BUCKETS_COUNT; ++i) { for (u64 i = 0; i < CACHE_BUCKETS_COUNT; ++i) {
struct cache_bucket *bucket = &G.cache.buckets[i]; struct cache_bucket *bucket = &G.cache.buckets[i];
sys_rw_mutex_lock_shared(&bucket->rw_mutex); sys_rw_mutex_lock_shared(&bucket->rw_mutex);
@ -671,37 +701,41 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_evictor_thread_entry_point, arg)
b32 consider_for_eviction = false; b32 consider_for_eviction = false;
b32 force_evict = false; b32 force_evict = false;
if (atomic_u32_eval(&n->state) == CACHE_NODE_STATE_LOADED) { if (atomic_u32_eval(&n->state) == CACHE_NODE_STATE_LOADED) {
sys_timestamp_t last_used_ts = atomic_u64_eval(&n->last_refcount0_ts); u64 refcount_uncast = atomic_u64_eval(&n->refcount_struct);
struct cache_node_refcount refcount = *(struct cache_node_refcount *)&refcount_uncast;
if (atomic_i32_eval(&n->refcount) <= 0) { if (refcount.count <= 0) {
#if RESOURCE_RELOADING #if RESOURCE_RELOADING
/* Check if file changed for resource reloading */ /* Check if file changed for resource reloading */
if (!consider_for_eviction) { if (!consider_for_eviction) {
struct string path = string_from_cstr_len((char *)n->tag_path, n->tag_path_len); struct string path = string_from_cstr_len((char *)n->tag_path, n->tag_path_len);
b32 file_changed = false;
if (!sys_is_file(path)) { if (!sys_is_file(path)) {
consider_for_eviction = true; file_changed = true;
} else { } else {
struct sys_datetime current_file_time;
{
struct sys_file file = sys_file_open_read(path); struct sys_file file = sys_file_open_read(path);
struct sys_file_time ft = sys_file_get_time(file); current_file_time = sys_file_get_time(file).modified;
sys_file_close(file); sys_file_close(file);
}
struct sys_datetime initial_file_time = n->initial_resource_file_modified_time; file_changed = MEMCMP_STRUCT(&n->initial_resource_file_modified_time, &current_file_time) != 0;
struct sys_datetime current_file_time = ft.modified; }
if (file_changed) {
if (MEMCMP_STRUCT(&initial_file_time, &current_file_time) != 0) {
logf_info("Resource file for sheet \"%F\" has changed. Evicting to allow for reloading.", FMT_STR(path)); logf_info("Resource file for sheet \"%F\" has changed. Evicting to allow for reloading.", FMT_STR(path));
consider_for_eviction = true; consider_for_eviction = true;
force_evict = true; force_evict = true;
} }
} }
}
#endif #endif
/* Check usage time */ /* Check usage time */
if (!consider_for_eviction && cache_over_budget && (cur_time - sys_timestamp_seconds(last_used_ts) > EVICTOR_GRACE_PERIOD)) { if (!RESOURCE_RELOADING || (!consider_for_eviction && cache_over_budget)) {
f64 last_used_time = (f64)refcount.last_modified_cycle * EVICTOR_CYCLE_INTERVAL;
if (cur_time - last_used_time > EVICTOR_GRACE_PERIOD) {
/* Cache is over budget and node hasn't been referenced in a while */ /* Cache is over budget and node hasn't been referenced in a while */
consider_for_eviction = true; consider_for_eviction = true;
} }
}
} }
@ -710,10 +744,10 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_evictor_thread_entry_point, arg)
struct evict_node *evict_node = arena_push_zero(scratch.arena, struct evict_node); struct evict_node *evict_node = arena_push_zero(scratch.arena, struct evict_node);
evict_node->cache_node = n; evict_node->cache_node = n;
evict_node->cache_bucket = bucket; evict_node->cache_bucket = bucket;
evict_node->last_used_ts = last_used_ts; evict_node->refcount = refcount;
evict_node->force_evict = force_evict; evict_node->force_evict = force_evict;
evict_node->next_unsorted = head_unsorted; evict_node->next_consider = head_consider;
head_unsorted = evict_node; head_consider = evict_node;
} }
n = n->next_hash; n = n->next_hash;
@ -725,41 +759,42 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_evictor_thread_entry_point, arg)
} }
/* Sort evict nodes by usage time */ /* Sort evict nodes by usage time */
{ if (head_consider) {
/* TODO: Optimize sort if necessary. Currently O(n^2). */ /* TODO: Optimize sort if necessary. Currently O(n^2). */
__profscope(eviction_sort); __profscope(eviction_sort);
for (struct evict_node *en = head_unsorted; en; en = en->next_unsorted) { for (struct evict_node *en = head_consider; en; en = en->next_consider) {
sys_timestamp_t ts = en->last_used_ts; u32 last_modified_cycle = en->refcount.last_modified_cycle;
struct evict_node *prev = NULL; struct evict_node *prev = NULL;
struct evict_node *next = oldest_sorted; struct evict_node *next = head_consider_lru;
while (next && !(ts <= next->last_used_ts || en->force_evict)) { while (next && !(last_modified_cycle <= next->refcount.last_modified_cycle || en->force_evict)) {
prev = next; prev = next;
next = next->next_sorted; next = next->next_consider_lru;
} }
if (prev) { if (prev) {
prev->next_sorted = en; prev->next_consider_lru = en;
} else { } else {
oldest_sorted = en; head_consider_lru = en;
} }
en->next_sorted = next; en->next_consider_lru = next;
} }
} }
/* Remove evictable nodes from cache table until under budget */ /* Remove evictable nodes from cache table until under budget */
{ if (head_consider_lru) {
__profscope(eviction_cache_removal); __profscope(eviction_cache_removal);
for (struct evict_node *en = oldest_sorted; en; en = en->next_sorted) { for (struct evict_node *en = head_consider_lru; en; en = en->next_consider_lru) {
struct cache_bucket *bucket = en->cache_bucket; struct cache_bucket *bucket = en->cache_bucket;
struct cache_node *n = en->cache_node; struct cache_node *n = en->cache_node;
sys_rw_mutex_lock_exclusive(&bucket->rw_mutex); sys_rw_mutex_lock_exclusive(&bucket->rw_mutex);
{ {
if (*atomic_i32_raw(&n->refcount) > 0 || (!en->force_evict && (*atomic_u64_raw(&n->last_refcount0_ts) != en->last_used_ts))) { struct cache_node_refcount refcount = *(struct cache_node_refcount *)atomic_u64_raw(&n->refcount_struct);
if (refcount.count > 0 || ((refcount.last_modified_cycle != en->refcount.last_modified_cycle) && !en->force_evict)) {
/* Cache node has been referenced since scan, skip eviction. */ /* Cache node has been referenced since scan, skip eviction. */
continue; continue;
} }
if (en->force_evict || atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET) { if (en->force_evict || atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET) {
/* Remove from cache table */ /* Remove from cache bucket */
if (n->prev_hash) { if (n->prev_hash) {
n->prev_hash->next_hash = n->next_hash; n->prev_hash->next_hash = n->next_hash;
} else { } else {
@ -804,10 +839,10 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_evictor_thread_entry_point, arg)
sys_mutex_unlock(&G.cache.node_pool_mutex); sys_mutex_unlock(&G.cache.node_pool_mutex);
} }
} }
atomic_u32_inc_eval(&G.evictor_cycle);
} }
sys_mutex_unlock(&G.evictor_mutex); sys_mutex_unlock(&G.evictor_mutex);
scratch_end(scratch); scratch_end(scratch);
if (abort) { if (abort) {
break; break;
} }