From f213593cdc48ebab982513a27f284a864abbd13c Mon Sep 17 00:00:00 2001 From: jacob Date: Thu, 15 May 2025 00:05:23 -0500 Subject: [PATCH] track sprite cache bin num_out_of_date_entries --- src/sprite.c | 54 ++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 42 insertions(+), 12 deletions(-) diff --git a/src/sprite.c b/src/sprite.c index 916c5571..2376dd07 100644 --- a/src/sprite.c +++ b/src/sprite.c @@ -71,6 +71,7 @@ struct cache_entry { struct atomic_u64 refcount_struct; /* Cast eval to `cache_refcount` */ /* Allocated data */ + /* NOTE: This data should only be read once the entry state = loaded */ u64 memory_usage; struct arena arena; struct sprite_texture *texture; @@ -92,6 +93,9 @@ struct cache_bin { struct sys_mutex mutex; struct cache_entry *first; struct cache_entry *last; +#if RESOURCE_RELOADING + i32 num_out_of_date_entries; +#endif }; struct cache { @@ -697,7 +701,9 @@ INTERNAL void cache_ref_release(struct cache_ref ref) INTERNAL struct sprite_scope_cache_ref *scope_get_ref_or_null(struct sprite_scope *scope, struct cache_entry *e, u64 bin_index, struct sys_lock *bin_lock) { - sys_assert_locked_e_or_s(bin_lock, &G.cache.bins[bin_index].mutex); /* Since entry may not have a reference, bin must be locked to ensure entry doesn't evict */ + /* Since entry may not have an existing reference, bin must be locked to ensure entry isn't evicted while adding first reference */ + sys_assert_locked_e_or_s(bin_lock, &G.cache.bins[bin_index].mutex); + struct sprite_scope_cache_ref *node = scope->ref_node_bins[bin_index]; while (node) { if (node->ref.e == e) { @@ -712,7 +718,8 @@ INTERNAL struct sprite_scope_cache_ref *scope_get_ref_or_null(struct sprite_scop INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref(struct sprite_scope *scope, struct cache_entry *e, u64 bin_index, struct sys_lock *bin_lock) { - sys_assert_locked_e_or_s(bin_lock, &G.cache.bins[bin_index].mutex); /* Since entry may not have a reference, bin must be locked to ensure entry doesn't evict */ + /* Since entry may not have an existing reference, bin must be locked to ensure entry isn't evicted while adding first reference */ + sys_assert_locked_e_or_s(bin_lock, &G.cache.bins[bin_index].mutex); struct sprite_scope_cache_ref **slot = &scope->ref_node_bins[bin_index]; while (*slot) { @@ -811,12 +818,23 @@ INTERNAL struct sprite_scope_cache_ref *cache_entry_lookup_touch(struct sprite_s for (struct cache_entry *entry = bin->first; entry; entry = entry->next_in_bin) { if (entry->hash.v == hash.v) { #if RESOURCE_RELOADING - b32 has_ref = scope_get_ref_or_null(scope, entry, bin_index, &bin_lock) != NULL; - if (has_ref) { + if (bin->num_out_of_date_entries <= 0) { + /* Common path */ match = entry; break; - } else if (!atomic_i32_eval(&entry->out_of_date)) { - match = entry; + } else { + /* If node is already referenced by the scope, then use it. + * Otherwise fall back to using any existing non out-of-date + * node that matches. Prioritizing existing references means + * that the associated entry will stay the same accross a + * scope's lifetime. */ + b32 has_ref = scope_get_ref_or_null(scope, entry, bin_index, &bin_lock) != NULL; + if (has_ref) { + match = entry; + break; + } else if (!atomic_i32_eval(&entry->out_of_date)) { + match = entry; + } } #else match = entry; @@ -1080,17 +1098,23 @@ INTERNAL WORK_TASK_FUNC_DEF(sprite_load_task, arg) INTERNAL RESOURCE_WATCH_CALLBACK_FUNC_DEF(sprite_resource_watch_callback, name) { + b32 exists = false; struct sprite_tag tag = sprite_tag_from_path(name); for (enum cache_entry_kind kind = 0; kind < NUM_CACHE_ENTRY_KINDS; ++kind) { struct cache_entry_hash hash = cache_entry_hash_from_tag_hash(tag.hash, kind); u64 bin_index = hash.v % CACHE_BINS_COUNT; struct cache_bin *bin = &G.cache.bins[bin_index]; - struct sys_lock lock = sys_mutex_lock_s(&bin->mutex); + struct sys_lock lock = sys_mutex_lock_e(&bin->mutex); /* Exclusive lock since we're modifying `bin->num_out_of_date_entries` */ { - for (struct cache_entry *n = bin->first; n; n = n->next_in_bin) { - if (n->hash.v == hash.v) { - logf_info("Sprite resource file \"%F\" has changed.", FMT_STR(name)); - atomic_i32_eval_exchange(&n->out_of_date, 1); + for (struct cache_entry *entry = bin->first; entry; entry = entry->next_in_bin) { + if (entry->hash.v == hash.v) { + if (!exists) { + logf_info("Sprite resource file \"%F\" has changed.", FMT_STR(name)); + exists = true; + } + if (atomic_i32_eval_compare_exchange(&entry->out_of_date, 0, 1) == 0) { + ++bin->num_out_of_date_entries; + } } } } @@ -1200,9 +1224,9 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg) struct evict_node *en = &evict_array[i]; struct cache_bin *bin = en->cache_bin; struct cache_entry *entry = en->cache_entry; - struct sys_lock bin_lock = sys_mutex_lock_e(&bin->mutex); i32 last_ref_cycle = en->last_ref_cycle; b32 cache_over_budget_target = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET_TARGET; + struct sys_lock bin_lock = sys_mutex_lock_e(&bin->mutex); { struct cache_refcount refcount = *(struct cache_refcount *)atomic_u64_raw(&entry->refcount_struct); if (refcount.count > 0 || (last_ref_cycle > 0 && refcount.last_ref_cycle != en->last_ref_cycle)) { @@ -1221,7 +1245,13 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg) } else { bin->last = prev; } + atomic_u64_eval_add_i64(&G.cache.memory_usage, -((i64)entry->memory_usage)); +#if RESOURCE_RELOADING + if (last_ref_cycle == 0) { + --bin->num_out_of_date_entries; + } +#endif /* Add to evicted list */ en->next_evicted = first_evicted;