diff --git a/src/sheet.c b/src/sheet.c index deb4ab72..aa5f4742 100644 --- a/src/sheet.c +++ b/src/sheet.c @@ -80,11 +80,16 @@ struct cache_node { #endif }; -struct cache { +struct cache_bucket { struct sys_rw_mutex rw_mutex; + struct cache_node *first; +}; + +struct cache { struct arena arena; - struct cache_node **buckets; - struct cache_node *first_free; + struct cache_bucket *buckets; + struct sys_mutex node_pool_mutex; + struct cache_node *node_pool_first_free; }; struct sheet_scope_reference { @@ -166,9 +171,12 @@ struct sheet_startup_receipt sheet_startup(struct work_startup_receipt *work_sr, (UNUSED)asset_cache_sr; (UNUSED)resource_sr; - G.cache.rw_mutex = sys_rw_mutex_alloc(); + G.cache.node_pool_mutex = sys_mutex_alloc(); G.cache.arena = arena_alloc(GIGABYTE(64)); - G.cache.buckets = arena_push_array_zero(&G.cache.arena, struct cache_node *, CACHE_BUCKETS_COUNT); + G.cache.buckets = arena_push_array_zero(&G.cache.arena, struct cache_bucket, CACHE_BUCKETS_COUNT); + for (u64 i = 0; i < CACHE_BUCKETS_COUNT; ++i) { + G.cache.buckets[i].rw_mutex = sys_rw_mutex_alloc(); + } G.loader_cmd_arena = arena_alloc(GIGABYTE(64)); G.loaders_mutex = sys_mutex_alloc(); @@ -335,7 +343,6 @@ INTERNAL void sheet_load(struct cache_node *n, struct sheet_tag tag) INTERNAL void scope_ensure_reference(struct sheet_scope *scope, struct cache_node *cache_node, u64 cache_bucket_index) { __prof; - struct sheet_scope_reference **ref_next = &scope->reference_buckets[cache_bucket_index]; struct sheet_scope_reference *ref = *ref_next; while (ref) { @@ -416,12 +423,13 @@ INTERNAL struct cache_node *node_lookup_touch(struct sheet_scope *scope, struct struct cache_node **nonmatching_next = NULL; u64 cache_bucket_index = tag.hash % CACHE_BUCKETS_COUNT; + struct cache_bucket *bucket = &G.cache.buckets[cache_bucket_index]; /* Lookup */ /* TODO: Spinlock */ - sys_rw_mutex_lock_shared(&G.cache.rw_mutex); + sys_rw_mutex_lock_shared(&bucket->rw_mutex); { - nonmatching_next = &G.cache.buckets[cache_bucket_index]; + nonmatching_next = &bucket->first; n = *nonmatching_next; while (n) { if (n->hash == tag.hash) { @@ -434,20 +442,26 @@ INTERNAL struct cache_node *node_lookup_touch(struct sheet_scope *scope, struct } } } - sys_rw_mutex_unlock_shared(&G.cache.rw_mutex); + sys_rw_mutex_unlock_shared(&bucket->rw_mutex); /* Allocate new node if necessary */ if (!n) { __profscope(node_lookup_allocate); - sys_rw_mutex_lock_exclusive(&G.cache.rw_mutex); + sys_rw_mutex_lock_exclusive(&bucket->rw_mutex); { - if (G.cache.first_free) { - n = G.cache.first_free; - G.cache.first_free = n->next_free; - MEMZERO_STRUCT(n); - } else { - n = arena_push_zero(&G.cache.arena, struct cache_node); + /* Alloc node */ + sys_mutex_lock(&G.cache.node_pool_mutex); + { + if (G.cache.node_pool_first_free) { + n = G.cache.node_pool_first_free; + G.cache.node_pool_first_free = n->next_free; + MEMZERO_STRUCT(n); + } else { + n = arena_push_zero(&G.cache.arena, struct cache_node); + } } + sys_mutex_unlock(&G.cache.node_pool_mutex); + /* Init node and add to bucket */ scope_ensure_reference(scope, n, cache_bucket_index); *nonmatching_next = n; if (nonmatching) { @@ -456,7 +470,7 @@ INTERNAL struct cache_node *node_lookup_touch(struct sheet_scope *scope, struct } n->hash = tag.hash; } - sys_rw_mutex_unlock_exclusive(&G.cache.rw_mutex); + sys_rw_mutex_unlock_exclusive(&bucket->rw_mutex); } return n; @@ -574,7 +588,13 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_loader_thread_entry_point, arg) break; } else if (!G.first_loader_cmd) { /* Wait for work */ +#if 1 sys_condition_variable_wait(&G.loaders_cv, &G.loaders_mutex); +#else + sys_mutex_unlock(&G.loaders_mutex); + sys_sleep(0.5); + sys_mutex_lock(&G.loaders_mutex); +#endif } @@ -611,14 +631,22 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_evictor_thread_entry_point, arg) (UNUSED)arg; struct evict_node { + b32 evicted; struct cache_node *cache_node; struct evict_node *next; - b32 evicted; + }; + + struct evict_bucket { + b32 contains_evicted_node; + struct cache_bucket *cache_bucket; + struct evict_node *head; + struct evict_bucket *next; }; while (true) { struct temp_arena scratch = scratch_begin_no_conflict(); - struct evict_node *head_evict_node = NULL; + struct evict_bucket *head_evict_bucket = NULL; + b32 any_node_evicted = false; b32 abort_thread_loop = false; sys_mutex_lock(&G.evictor_mutex); @@ -635,122 +663,145 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_evictor_thread_entry_point, arg) sys_timestamp_t cur_timestamp = sys_timestamp(); f64 cur_time = sys_timestamp_seconds(cur_timestamp); - sys_rw_mutex_lock_shared(&G.cache.rw_mutex); + /* Scan for evictable nodes */ { __profscope(eviction_scan); for (u64 i = 0; i < CACHE_BUCKETS_COUNT; ++i) { - struct cache_node *n = G.cache.buckets[i]; + struct cache_bucket *bucket = &G.cache.buckets[i]; + struct evict_node *head_evict_node = NULL; + sys_rw_mutex_lock_shared(&bucket->rw_mutex); + { + struct cache_node *n = bucket->first; + while (n) { + b32 should_evict_node = false; - while (n) { - b32 should_evict = false; - - if ((atomic_u32_eval(&n->state) == CACHE_NODE_STATE_LOADED) && (atomic_i32_eval(&n->refcount) <= 0)) { - /* Check usage time */ - if (!should_evict) { - /* TODO: Only evict if over memory budget (in LRU order until under memory budget) */ - sys_timestamp_t last_used_ts = atomic_u64_raw(&n->last_refcount0_ts); - f64 last_used_time = sys_timestamp_seconds(last_used_ts); - if (cur_time - last_used_time > EVICTOR_GRACE_PERIOD) { - /* Cache entry unused for too long */ - should_evict = true; - } - } - -#if RESOURCE_RELOADING - /* Check for file changes for resource reloading */ - if (!should_evict) { - should_evict = true; - struct string path = string_from_cstr_len((char *)n->tag_path, n->tag_path_len); - if (sys_is_file(path)) { - struct sys_file file = sys_file_open_read(path); - struct sys_file_time ft = sys_file_get_time(file); - sys_file_close(file); - - struct sys_datetime initial_file_time = n->initial_resource_file_modified_time; - struct sys_datetime current_file_time = ft.modified; - - if (MEMCMP_STRUCT(&initial_file_time, ¤t_file_time) != 0) { - logf_info("Resource file for sheet \"%F\" has changed. Evicting to allow for reloading.", FMT_STR(path)); - should_evict = true; - } else { - /* File unchanged */ - should_evict = false; + if ((atomic_u32_eval(&n->state) == CACHE_NODE_STATE_LOADED) && (atomic_i32_eval(&n->refcount) <= 0)) { + /* Check usage time */ + if (!should_evict_node) { + /* TODO: Only evict if over memory budget (in LRU order until under memory budget) */ + sys_timestamp_t last_used_ts = atomic_u64_raw(&n->last_refcount0_ts); + f64 last_used_time = sys_timestamp_seconds(last_used_ts); + if (cur_time - last_used_time > EVICTOR_GRACE_PERIOD) { + /* Cache entry unused for too long */ + should_evict_node = true; + } + } + +#if RESOURCE_RELOADING + /* Check for file changes for resource reloading */ + if (!should_evict_node) { + should_evict_node = true; + struct string path = string_from_cstr_len((char *)n->tag_path, n->tag_path_len); + if (sys_is_file(path)) { + struct sys_file file = sys_file_open_read(path); + struct sys_file_time ft = sys_file_get_time(file); + sys_file_close(file); + + struct sys_datetime initial_file_time = n->initial_resource_file_modified_time; + struct sys_datetime current_file_time = ft.modified; + + if (MEMCMP_STRUCT(&initial_file_time, ¤t_file_time) != 0) { + logf_info("Resource file for sheet \"%F\" has changed. Evicting to allow for reloading.", FMT_STR(path)); + should_evict_node = true; + } else { + /* File unchanged */ + should_evict_node = false; + } } } - } #endif + /* Add node to bucket evict list */ + if (should_evict_node) { + struct evict_node *evict_node = arena_push_zero(scratch.arena, struct evict_node); + evict_node->cache_node = n; + evict_node->next = head_evict_node; + head_evict_node = evict_node; + } - if (should_evict) { - /* Add to evict list */ - struct evict_node *evict_node = arena_push_zero(scratch.arena, struct evict_node); - evict_node->cache_node = n; - evict_node->next = head_evict_node; - head_evict_node = evict_node; + n = n->next_hash; } - - n = n->next_hash; } } + sys_rw_mutex_unlock_shared(&bucket->rw_mutex); + + /* Add bucket with nodes to evict list */ + if (head_evict_node) { + struct evict_bucket *evict_bucket = arena_push_zero(scratch.arena, struct evict_bucket); + evict_bucket->cache_bucket = bucket; + evict_bucket->head = head_evict_node; + evict_bucket->next = head_evict_bucket; + head_evict_bucket = evict_bucket; + } } } - sys_rw_mutex_unlock_shared(&G.cache.rw_mutex); - if (head_evict_node) { - /* Remove evictable nodes from cache table */ - sys_rw_mutex_lock_exclusive(&G.cache.rw_mutex); - { - __profscope(eviction_cache_removal); - for (struct evict_node *en = head_evict_node; en; en = en->next) { - struct cache_node *n = en->cache_node; - /* Check that cache node is still loaded and unreferenced */ - if ((atomic_u32_eval(&n->state) == CACHE_NODE_STATE_LOADED) && (atomic_i32_eval(&n->refcount) <= 0)) { - /* Remove from cache table */ - if (n->prev_hash) { - n->prev_hash->next_hash = n->next_hash; - } else { - u64 cache_bucket_index = n->hash % CACHE_BUCKETS_COUNT; - G.cache.buckets[cache_bucket_index] = n->next_hash; + /* Remove evictable nodes from cache table */ + { + __profscope(eviction_cache_removal); + for (struct evict_bucket *eb = head_evict_bucket; eb; eb = eb->next) { + struct cache_bucket *bucket = eb->cache_bucket; + sys_rw_mutex_lock_exclusive(&bucket->rw_mutex); + { + for (struct evict_node *en = eb->head; en; en = en->next) { + struct cache_node *n = en->cache_node; + /* Check that cache node is still loaded and unreferenced */ + if ((atomic_u32_eval(&n->state) == CACHE_NODE_STATE_LOADED) && (atomic_i32_eval(&n->refcount) <= 0)) { + /* Remove from cache table */ + if (n->prev_hash) { + n->prev_hash->next_hash = n->next_hash; + } else { + bucket->first = n->next_hash; + } + if (n->next_hash) { + n->next_hash->prev_hash = n->prev_hash; + } + en->evicted = true; + eb->contains_evicted_node = true; + any_node_evicted = true; } - if (n->next_hash) { - n->next_hash->prev_hash = n->prev_hash; - } - en->evicted = true; } } + sys_rw_mutex_unlock_exclusive(&bucket->rw_mutex); } - sys_rw_mutex_unlock_exclusive(&G.cache.rw_mutex); + } - /* Free evicted node memory */ - { - __profscope(eviction_memory_release); - for (struct evict_node *en = head_evict_node; en; en = en->next) { - if (en->evicted) { - struct cache_node *n = en->cache_node; - arena_release(&n->arena); + if (any_node_evicted) { + /* Release evicted node memory */ + __profscope(eviction_memory_release); + for (struct evict_bucket *eb = head_evict_bucket; eb; eb = eb->next) { + if (eb->contains_evicted_node) { + for (struct evict_node *en = eb->head; en; en = en->next) { + if (en->evicted) { + struct cache_node *n = en->cache_node; + arena_release(&n->arena); + } } } } /* Add evicted nodes to free list */ - /* TODO: Create and lock separate mutex for free list */ - sys_rw_mutex_lock_exclusive(&G.cache.rw_mutex); + __profscope(eviction_free_list_append); + sys_mutex_lock(&G.cache.node_pool_mutex); { - __profscope(eviction_free_list_append); - for (struct evict_node *en = head_evict_node; en; en = en->next) { - struct cache_node *n = en->cache_node; - if (en->evicted) { - n->next_free = G.cache.first_free; - G.cache.first_free = n; + for (struct evict_bucket *eb = head_evict_bucket; eb; eb = eb->next) { + if (eb->contains_evicted_node) { + for (struct evict_node *en = eb->head; en; en = en->next) { + if (en->evicted) { + struct cache_node *n = en->cache_node; + n->next_free = G.cache.node_pool_first_free; + G.cache.node_pool_first_free = n; + } + } } } } - sys_rw_mutex_unlock_exclusive(&G.cache.rw_mutex); + sys_mutex_unlock(&G.cache.node_pool_mutex); } } abort_thread_loop: - sys_mutex_unlock(&G.evictor_mutex); + sys_mutex_unlock(&G.evictor_mutex); scratch_end(scratch); if (abort_thread_loop) {