diff --git a/src/sprite.c b/src/sprite.c index 2376dd07..796d5556 100644 --- a/src/sprite.c +++ b/src/sprite.c @@ -71,7 +71,8 @@ struct cache_entry { struct atomic_u64 refcount_struct; /* Cast eval to `cache_refcount` */ /* Allocated data */ - /* NOTE: This data should only be read once the entry state = loaded */ + /* NOTE: This data is finalized once entry state = loaded */ + i64 load_time_ns; u64 memory_usage; struct arena arena; struct sprite_texture *texture; @@ -93,9 +94,6 @@ struct cache_bin { struct sys_mutex mutex; struct cache_entry *first; struct cache_entry *last; -#if RESOURCE_RELOADING - i32 num_out_of_date_entries; -#endif }; struct cache { @@ -221,6 +219,7 @@ struct sprite_startup_receipt sprite_startup(struct renderer_startup_receipt *re { /* Init loading texture */ G.loading_texture = arena_push_zero(&G.perm_arena, struct sprite_texture); + /* Init nil texture */ G.nil_texture = arena_push_zero(&G.perm_arena, struct sprite_texture); G.nil_texture->loaded = true; @@ -336,7 +335,10 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag t struct ase_decode_image_result decoded = ZI; if (resource_exists(path)) { struct resource texture_rs = resource_open(path); - decoded = ase_decode_image(scratch.arena, resource_get_data(&texture_rs)); + { + e->load_time_ns = sys_time_ns(); + decoded = ase_decode_image(scratch.arena, resource_get_data(&texture_rs)); + } resource_close(&texture_rs); /* Initialize */ @@ -632,7 +634,10 @@ INTERNAL void cache_entry_load_sheet(struct cache_ref ref, struct sprite_tag tag struct ase_decode_sheet_result decoded = ZI; if (resource_exists(path)) { struct resource sheet_rs = resource_open(path); - decoded = ase_decode_sheet(scratch.arena, resource_get_data(&sheet_rs)); + { + ref.e->load_time_ns = sys_time_ns(); + decoded = ase_decode_sheet(scratch.arena, resource_get_data(&sheet_rs)); + } resource_close(&sheet_rs); /* Initialize */ @@ -699,26 +704,10 @@ INTERNAL void cache_ref_release(struct cache_ref ref) * Scope * ========================== */ -INTERNAL struct sprite_scope_cache_ref *scope_get_ref_or_null(struct sprite_scope *scope, struct cache_entry *e, u64 bin_index, struct sys_lock *bin_lock) -{ - /* Since entry may not have an existing reference, bin must be locked to ensure entry isn't evicted while adding first reference */ - sys_assert_locked_e_or_s(bin_lock, &G.cache.bins[bin_index].mutex); - - struct sprite_scope_cache_ref *node = scope->ref_node_bins[bin_index]; - while (node) { - if (node->ref.e == e) { - /* Found reference in scope */ - break; - } else { - node = node->next_in_bin; - } - } - return node; -} - -INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref(struct sprite_scope *scope, struct cache_entry *e, u64 bin_index, struct sys_lock *bin_lock) +INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref(struct sprite_scope *scope, struct cache_entry *e, struct sys_lock *bin_lock) { /* Since entry may not have an existing reference, bin must be locked to ensure entry isn't evicted while adding first reference */ + u64 bin_index = e->hash.v % CACHE_BINS_COUNT; sys_assert_locked_e_or_s(bin_lock, &G.cache.bins[bin_index].mutex); struct sprite_scope_cache_ref **slot = &scope->ref_node_bins[bin_index]; @@ -740,11 +729,11 @@ INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref(struct sprite_scope *sc refcount_add(e, 1); /* Grab node from pool */ - struct sprite_scope_cache_ref *node = &scope->ref_node_pool[scope->num_references++]; - MEMZERO_STRUCT(node); - node->ref.e = e; + struct sprite_scope_cache_ref *scope_ref = &scope->ref_node_pool[scope->num_references++]; + MEMZERO_STRUCT(scope_ref); + scope_ref->ref.e = e; - *slot = node; + *slot = scope_ref; } return *slot; @@ -801,87 +790,107 @@ void sprite_scope_end(struct sprite_scope *scope) * Cache interface * ========================== */ -INTERNAL struct sprite_scope_cache_ref *cache_entry_lookup_touch(struct sprite_scope *scope, struct sprite_tag tag, enum cache_entry_kind kind) +INTERNAL struct sprite_scope_cache_ref *cache_lookup(struct sprite_scope *scope, struct cache_entry_hash hash, struct sys_lock *bin_lock) +{ + struct sprite_scope_cache_ref *scope_ref = NULL; + + struct cache_bin *bin = &G.cache.bins[hash.v % CACHE_BINS_COUNT]; + sys_assert_locked_e_or_s(bin_lock, &bin->mutex); /* Lock required for iterating bin */ + +#if RESOURCE_RELOADING + /* If resource reloading is enabled, then we want to find the + * newest entry rather than the first one that exists since + * there may be more than one matching entry in the cache */ + struct cache_entry *match = NULL; + enum cache_entry_state match_state = CACHE_ENTRY_STATE_NONE; + for (struct cache_entry *entry = bin->first; entry; entry = entry->next_in_bin) { + if (entry->hash.v == hash.v) { + enum cache_entry_state entry_state = atomic_i32_eval(&entry->state); + if (!match || entry_state > match_state || (entry_state == CACHE_ENTRY_STATE_LOADED && match_state == CACHE_ENTRY_STATE_LOADED && entry->load_time_ns > match->load_time_ns)) { + match = entry; + match_state = entry_state; + } + } + } + if (match) { + scope_ref = scope_ensure_ref(scope, match, bin_lock); + } +#else + for (struct cache_entry *entry = bin->first; entry; entry = entry->next_in_bin) { + if (entry->hash.v == hash.v) { + scope_ref = scope_ensure_ref(scope, entry, &bin_lock); + break; + } + } +#endif + + return scope_ref; +} + +INTERNAL struct sprite_scope_cache_ref *cache_entry_from_tag(struct sprite_scope *scope, struct sprite_tag tag, enum cache_entry_kind kind) { __prof; - struct sprite_scope_cache_ref *scope_ref = NULL; - struct cache_entry_hash hash = cache_entry_hash_from_tag_hash(tag.hash, kind); u64 bin_index = hash.v % CACHE_BINS_COUNT; - struct cache_bin *bin = &G.cache.bins[bin_index]; + struct cache_bin *bin = &G.cache.bins[hash.v % CACHE_BINS_COUNT]; - /* Lookup */ - { - struct sys_lock bin_lock = sys_mutex_lock_s(&bin->mutex); - struct cache_entry *match = NULL; - for (struct cache_entry *entry = bin->first; entry; entry = entry->next_in_bin) { - if (entry->hash.v == hash.v) { -#if RESOURCE_RELOADING - if (bin->num_out_of_date_entries <= 0) { - /* Common path */ - match = entry; - break; - } else { - /* If node is already referenced by the scope, then use it. - * Otherwise fall back to using any existing non out-of-date - * node that matches. Prioritizing existing references means - * that the associated entry will stay the same accross a - * scope's lifetime. */ - b32 has_ref = scope_get_ref_or_null(scope, entry, bin_index, &bin_lock) != NULL; - if (has_ref) { - match = entry; - break; - } else if (!atomic_i32_eval(&entry->out_of_date)) { - match = entry; - } - } -#else - match = entry; - break; -#endif - } + /* Search for entry in scope */ + struct sprite_scope_cache_ref *scope_ref = scope->ref_node_bins[bin_index]; + while (scope_ref) { + if (scope_ref->ref.e->hash.v == hash.v) { + break; } - if (match) { - scope_ref = scope_ensure_ref(scope, match, bin_index, &bin_lock); + scope_ref = scope_ref->next_in_bin; + } + + /* Search for entry in cache */ + if (!scope_ref) { + struct sys_lock bin_lock = sys_mutex_lock_s(&bin->mutex); + { + scope_ref = cache_lookup(scope, hash, &bin_lock); } sys_mutex_unlock(&bin_lock); } - /* Allocate new entry if necessary */ + /* Allocate new entry */ if (!scope_ref) { - __profscope(entry_lookup_allocate); - struct sys_lock bin_lock = sys_mutex_lock_e(&bin->mutex); + struct sys_lock bin_lock = sys_mutex_lock_s(&bin->mutex); { - /* Alloc entry */ - struct cache_entry *entry = NULL; - { - struct sys_lock pool_lock = sys_mutex_lock_e(&G.cache.entry_pool_mutex); - if (G.cache.entry_pool_first_free) { - entry = G.cache.entry_pool_first_free; - G.cache.entry_pool_first_free = entry->next_free; - MEMZERO_STRUCT(entry); - } else { - entry = arena_push_zero(&G.cache.arena, struct cache_entry); + /* Search cache one more time in case an entry was allocated between locks */ + scope_ref = cache_lookup(scope, hash, &bin_lock); + if (!scope_ref) { + /* Cache entry still absent, allocate new entry */ + struct cache_entry *entry = NULL; + { + struct sys_lock pool_lock = sys_mutex_lock_e(&G.cache.entry_pool_mutex); + if (G.cache.entry_pool_first_free) { + entry = G.cache.entry_pool_first_free; + G.cache.entry_pool_first_free = entry->next_free; + } else { + entry = arena_push(&G.cache.arena, struct cache_entry); + } + sys_mutex_unlock(&pool_lock); } - sys_mutex_unlock(&pool_lock); - } + MEMZERO_STRUCT(entry); - /* Init node and add to bin */ - scope_ref = scope_ensure_ref(scope, entry, bin_index, &bin_lock); - { - if (bin->last) { - bin->last->next_in_bin = entry; - entry->prev_in_bin = bin->last; - } else { - bin->first = entry; + /* Init entry and add to bin */ + { + if (bin->last) { + bin->last->next_in_bin = entry; + entry->prev_in_bin = bin->last; + } else { + bin->first = entry; + } + bin->last = entry; } - bin->last = entry; + entry->hash = cache_entry_hash_from_tag_hash(tag.hash, kind); + entry->kind = kind; + entry->texture = G.nil_texture; + entry->sheet = G.nil_sheet; + + scope_ref = scope_ensure_ref(scope, entry, &bin_lock); } - entry->hash = cache_entry_hash_from_tag_hash(tag.hash, kind); - entry->kind = kind; - entry->texture = G.nil_texture; - entry->sheet = G.nil_sheet; } sys_mutex_unlock(&bin_lock); } @@ -899,7 +908,7 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_ default: { sys_panic(LIT("Unknown sprite cache entry kind")); } break; } - struct sprite_scope_cache_ref *scope_ref = cache_entry_lookup_touch(scope, tag, kind); + struct sprite_scope_cache_ref *scope_ref = cache_entry_from_tag(scope, tag, kind); struct cache_ref ref = scope_ref->ref; enum cache_entry_state state = atomic_i32_eval(&ref.e->state); @@ -1104,7 +1113,7 @@ INTERNAL RESOURCE_WATCH_CALLBACK_FUNC_DEF(sprite_resource_watch_callback, name) struct cache_entry_hash hash = cache_entry_hash_from_tag_hash(tag.hash, kind); u64 bin_index = hash.v % CACHE_BINS_COUNT; struct cache_bin *bin = &G.cache.bins[bin_index]; - struct sys_lock lock = sys_mutex_lock_e(&bin->mutex); /* Exclusive lock since we're modifying `bin->num_out_of_date_entries` */ + struct sys_lock lock = sys_mutex_lock_s(&bin->mutex); { for (struct cache_entry *entry = bin->first; entry; entry = entry->next_in_bin) { if (entry->hash.v == hash.v) { @@ -1112,9 +1121,6 @@ INTERNAL RESOURCE_WATCH_CALLBACK_FUNC_DEF(sprite_resource_watch_callback, name) logf_info("Sprite resource file \"%F\" has changed.", FMT_STR(name)); exists = true; } - if (atomic_i32_eval_compare_exchange(&entry->out_of_date, 0, 1) == 0) { - ++bin->num_out_of_date_entries; - } } } } @@ -1247,11 +1253,6 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg) } atomic_u64_eval_add_i64(&G.cache.memory_usage, -((i64)entry->memory_usage)); -#if RESOURCE_RELOADING - if (last_ref_cycle == 0) { - --bin->num_out_of_date_entries; - } -#endif /* Add to evicted list */ en->next_evicted = first_evicted;