diff --git a/src/sheet.c b/src/sheet.c index 5520817f..fc8a57a6 100644 --- a/src/sheet.c +++ b/src/sheet.c @@ -16,8 +16,8 @@ #define SHEET_LOOKUP_TABLE_BUCKET_RATIO 2.0 #define TCTX_ARENA_RESERVE MEGABYTE(64) -#define CACHE_BUCKETS_COUNT 256 -#define SCOPE_BUCKETS_COUNT 256 +#define CACHE_BUCKETS_COUNT 1024 +#define SCOPE_BUCKETS_COUNT 1024 #define MAX_LOADER_THREADS 4 @@ -30,7 +30,7 @@ #define EVICTOR_CHECK_INTERVAl 0.500 /* Time a cache entry spends unused it's considered evictable */ -#define EVICTOR_GRACE_TIME 10.000 +#define EVICTOR_GRACE_PERIOD 10.000 /* ========================== * * Loader cmd structs @@ -335,6 +335,8 @@ INTERNAL void sheet_load(struct cache_node *n, struct sheet_tag tag) INTERNAL void scope_ensure_reference(struct sheet_scope *scope, struct cache_node *cache_node, u64 cache_bucket_index) { + __prof; + struct sheet_scope_reference **ref_next = &scope->reference_buckets[cache_bucket_index]; struct sheet_scope_reference *ref = *ref_next; while (ref) { @@ -408,6 +410,8 @@ void sheet_scope_end(struct sheet_scope *scope) INTERNAL struct cache_node *node_lookup_touch(struct sheet_scope *scope, struct sheet_tag tag) { + __prof; + struct cache_node *n = NULL; struct cache_node *nonmatching = NULL; struct cache_node **nonmatching_next = NULL; @@ -435,6 +439,7 @@ INTERNAL struct cache_node *node_lookup_touch(struct sheet_scope *scope, struct /* Allocate new node if necessary */ if (!n) { + __profscope(node_lookup_allocate); sys_rw_mutex_lock_exclusive(&G.cache.rw_mutex); { if (G.cache.first_free) { @@ -511,11 +516,13 @@ INTERNAL struct sheet *sheet_from_tag_internal(struct sheet_scope *scope, struct struct sheet *sheet_from_tag_await(struct sheet_scope *scope, struct sheet_tag tag) { + __prof; return sheet_from_tag_internal(scope, tag, true); } struct sheet *sheet_from_tag_async(struct sheet_scope *scope, struct sheet_tag tag) { + __prof; return sheet_from_tag_internal(scope, tag, false); } @@ -610,8 +617,15 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_evictor_thread_entry_point, arg) { (UNUSED)arg; + struct evict_node { + struct cache_node *cache_node; + struct evict_node *next; + b32 evicted; + }; + while (true) { struct temp_arena scratch = scratch_begin_no_conflict(); + struct evict_node *head_evict_node = NULL; b32 abort_thread_loop = false; sys_mutex_lock(&G.evictor_mutex); @@ -628,24 +642,30 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_evictor_thread_entry_point, arg) sys_timestamp_t cur_timestamp = sys_timestamp(); f64 cur_time = sys_timestamp_seconds(cur_timestamp); - sys_rw_mutex_lock_exclusive(&G.cache.rw_mutex); + sys_rw_mutex_lock_shared(&G.cache.rw_mutex); { + __profscope(eviction_scan); + for (u64 i = 0; i < CACHE_BUCKETS_COUNT; ++i) { struct cache_node *n = G.cache.buckets[i]; while (n) { - if ((*atomic_u32_raw(&n->state) == CACHE_NODE_STATE_LOADED) && (*atomic_i32_raw(&n->refcount) <= 0)) { - b32 should_evict = false; + b32 should_evict = false; - /* TODO: Only evict if over memory budget (in LRU order until under memory budget) */ - sys_timestamp_t last_used_ts = atomic_u64_raw(&n->last_refcount0_ts); - f64 last_used_time = sys_timestamp_seconds(last_used_ts); - if (cur_time - last_used_time > EVICTOR_GRACE_TIME) { - /* Cache entry unused for too long */ - should_evict = true; + if ((atomic_u32_eval(&n->state) == CACHE_NODE_STATE_LOADED) && (atomic_i32_eval(&n->refcount) <= 0)) { + /* Check usage time */ + if (!should_evict) { + /* TODO: Only evict if over memory budget (in LRU order until under memory budget) */ + sys_timestamp_t last_used_ts = atomic_u64_raw(&n->last_refcount0_ts); + f64 last_used_time = sys_timestamp_seconds(last_used_ts); + if (cur_time - last_used_time > EVICTOR_GRACE_PERIOD) { + /* Cache entry unused for too long */ + should_evict = true; + } } #if RESOURCE_RELOADING + /* Check for file changes for resource reloading */ if (!should_evict) { should_evict = true; struct string path = string_from_cstr_len((char *)n->tag_path, n->tag_path_len); @@ -657,41 +677,83 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_evictor_thread_entry_point, arg) struct sys_datetime initial_file_time = n->initial_resource_file_modified_time; struct sys_datetime current_file_time = ft.modified; - if (MEMCMP_STRUCT(&initial_file_time, ¤t_file_time) == 0) { - /* File unchanged */ - should_evict = false; - } else { + if (MEMCMP_STRUCT(&initial_file_time, ¤t_file_time) != 0) { logf_info("Resource file for sheet \"%F\" has changed. Evicting to allow for reloading.", FMT_STR(path)); should_evict = true; + } else { + /* File unchanged */ + should_evict = false; } } } #endif if (should_evict) { - __profscope(evict_sheet); - *atomic_u32_raw(&n->state) = CACHE_NODE_STATE_EVICTED; - /* Remove from hash table */ - if (n->prev_hash) { - n->prev_hash->next_hash = n->next_hash; - } else { - u64 cache_bucket_index = n->hash % CACHE_BUCKETS_COUNT; - G.cache.buckets[cache_bucket_index] = n->next_hash; - } - if (n->next_hash) { - n->next_hash->prev_hash = n->prev_hash; - } - /* Add to free list */ - n->next_free = G.cache.first_free; - G.cache.first_free = n; - arena_release(&n->arena); + /* Add to evict list */ + struct evict_node *evict_node = arena_push_zero(scratch.arena, struct evict_node); + evict_node->cache_node = n; + evict_node->next = head_evict_node; + head_evict_node = evict_node; } + + n = n->next_hash; } - n = n->next_hash; } } } - sys_rw_mutex_unlock_exclusive(&G.cache.rw_mutex); + sys_rw_mutex_unlock_shared(&G.cache.rw_mutex); + + if (head_evict_node) { + /* Scan for evictable nodes and remove from cache table */ + sys_rw_mutex_lock_exclusive(&G.cache.rw_mutex); + { + __profscope(eviction_cache_removal); + for (struct evict_node *en = head_evict_node; en; en = en->next) { + struct cache_node *n = en->cache_node; + /* Check that cache node is still loaded and unreferenced */ + if ((atomic_u32_eval(&n->state) == CACHE_NODE_STATE_LOADED) && (atomic_i32_eval(&n->refcount) <= 0)) { + /* Remove from cache table */ + if (n->prev_hash) { + n->prev_hash->next_hash = n->next_hash; + } else { + u64 cache_bucket_index = n->hash % CACHE_BUCKETS_COUNT; + G.cache.buckets[cache_bucket_index] = n->next_hash; + } + if (n->next_hash) { + n->next_hash->prev_hash = n->prev_hash; + } + en->evicted = true; + } + } + } + sys_rw_mutex_unlock_exclusive(&G.cache.rw_mutex); + + /* Free evicted node memory */ + { + __profscope(eviction_memory_release); + for (struct evict_node *en = head_evict_node; en; en = en->next) { + if (en->evicted) { + struct cache_node *n = en->cache_node; + arena_release(&n->arena); + } + } + } + + /* Add evicted nodes to free list */ + /* TODO: Create and lock separate mutex for free list */ + sys_rw_mutex_lock_exclusive(&G.cache.rw_mutex); + { + __profscope(eviction_free_list_append); + for (struct evict_node *en = head_evict_node; en; en = en->next) { + struct cache_node *n = en->cache_node; + if (en->evicted) { + n->next_free = G.cache.first_free; + G.cache.first_free = n; + } + } + } + sys_rw_mutex_unlock_exclusive(&G.cache.rw_mutex); + } } abort_thread_loop: sys_mutex_unlock(&G.evictor_mutex); diff --git a/src/sys_win32.c b/src/sys_win32.c index 512a94f2..35ae2caf 100644 --- a/src/sys_win32.c +++ b/src/sys_win32.c @@ -485,6 +485,8 @@ u64 sys_file_get_size(struct sys_file file) struct sys_file_time sys_file_get_time(struct sys_file file) { + __prof; + /* Get file times */ FILETIME ft_created; FILETIME ft_accessed;