sprite hot-reload without prematurely unloading (remove reload flicker)

This commit is contained in:
jacob 2025-05-15 02:46:33 -05:00
parent 282f89f4fa
commit fafbfbfa6a
2 changed files with 194 additions and 149 deletions

View File

@ -26,7 +26,7 @@ CT_ASSERT(CACHE_MEMORY_BUDGET_THRESHOLD >= CACHE_MEMORY_BUDGET_TARGET);
/* How long between evictor thread scans */ /* How long between evictor thread scans */
#define EVICTOR_CYCLE_INTERVAL_NS NS_FROM_SECONDS(0.500) #define EVICTOR_CYCLE_INTERVAL_NS NS_FROM_SECONDS(0.500)
/* Cycles a cache entry spends unused until it's considered evictable */ /* How many cycles a cache entry spends unused until it's considered evictable */
#define EVICTOR_GRACE_PERIOD_CYCLES (NS_FROM_SECONDS(10.000) / EVICTOR_CYCLE_INTERVAL_NS) #define EVICTOR_GRACE_PERIOD_CYCLES (NS_FROM_SECONDS(10.000) / EVICTOR_CYCLE_INTERVAL_NS)
/* Texture arena only used to store texture struct at the moment. Actual image data is allocated on GPU. */ /* Texture arena only used to store texture struct at the moment. Actual image data is allocated on GPU. */
@ -56,7 +56,7 @@ enum cache_entry_state {
struct cache_refcount { struct cache_refcount {
i32 count; /* Number of scopes currently holding a reference to this entry */ i32 count; /* Number of scopes currently holding a reference to this entry */
i32 last_ref_cycle; /* Last time that refcount was modified */ i32 last_ref_cycle; /* Last evictor cycle that the refcount was modified */
}; };
CT_ASSERT(sizeof(struct cache_refcount) == 8); /* Must fit into 64 bit atomic */ CT_ASSERT(sizeof(struct cache_refcount) == 8); /* Must fit into 64 bit atomic */
@ -121,7 +121,8 @@ struct sprite_scope_cache_ref {
struct load_cmd { struct load_cmd {
struct load_cmd *next_free; struct load_cmd *next_free;
struct cache_ref ref; struct sprite_scope *scope;
struct sprite_scope_cache_ref *scope_ref;
struct sprite_tag tag; struct sprite_tag tag;
u8 tag_path_buff[512]; u8 tag_path_buff[512];
}; };
@ -311,6 +312,36 @@ INTERNAL struct cache_entry_hash cache_entry_hash_from_tag_hash(u64 tag_hash, en
* Load * Load
* ========================== */ * ========================== */
INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref_from_ref(struct sprite_scope *scope, struct cache_ref ref);
INTERNAL void push_load_task(struct cache_ref ref, struct sprite_tag tag)
{
struct load_cmd *cmd = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(&G.load_cmds_mutex);
if (G.first_free_load_cmd) {
cmd = G.first_free_load_cmd;
G.first_free_load_cmd = cmd->next_free;
} else {
cmd = arena_push(&G.load_cmds_arena, struct load_cmd);
}
sys_mutex_unlock(&lock);
}
MEMZERO_STRUCT(cmd);
/* Initialize cmd */
cmd->scope = sprite_scope_begin();
cmd->scope_ref = scope_ensure_ref_from_ref(cmd->scope, ref);
cmd->tag = tag;
{
u64 copy_len = min_u64(tag.path.len, ARRAY_COUNT(cmd->tag_path_buff));
cmd->tag.path.text = cmd->tag_path_buff;
MEMCPY(cmd->tag.path.text, tag.path.text, copy_len);
}
/* Push work */
work_push_task(&sprite_load_task, cmd, WORK_PRIORITY_NORMAL);
}
INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag tag) INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag tag)
{ {
__prof; __prof;
@ -335,10 +366,7 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag t
struct ase_decode_image_result decoded = ZI; struct ase_decode_image_result decoded = ZI;
if (resource_exists(path)) { if (resource_exists(path)) {
struct resource texture_rs = resource_open(path); struct resource texture_rs = resource_open(path);
{
e->load_time_ns = sys_time_ns();
decoded = ase_decode_image(scratch.arena, resource_get_data(&texture_rs)); decoded = ase_decode_image(scratch.arena, resource_get_data(&texture_rs));
}
resource_close(&texture_rs); resource_close(&texture_rs);
/* Initialize */ /* Initialize */
@ -367,6 +395,20 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag t
atomic_i32_eval_exchange(&e->state, CACHE_ENTRY_STATE_LOADED); atomic_i32_eval_exchange(&e->state, CACHE_ENTRY_STATE_LOADED);
#if RESOURCE_RELOADING
struct cache_bin *bin = &G.cache.bins[e->hash.v % CACHE_BINS_COUNT];
struct sys_lock bin_lock = sys_mutex_lock_e(&bin->mutex);
{
for (struct cache_entry *old_entry = bin->first; old_entry; old_entry = old_entry->next_in_bin) {
if (old_entry->hash.v == e->hash.v) {
atomic_i32_eval_exchange(&old_entry->out_of_date, 1);
}
}
e->load_time_ns = sys_time_ns();
}
sys_mutex_unlock(&bin_lock);
#endif
scratch_end(scratch); scratch_end(scratch);
} }
@ -618,56 +660,68 @@ INTERNAL void cache_entry_load_sheet(struct cache_ref ref, struct sprite_tag tag
{ {
__prof; __prof;
struct temp_arena scratch = scratch_begin_no_conflict(); struct temp_arena scratch = scratch_begin_no_conflict();
struct cache_entry *e = ref.e;
atomic_i32_eval_exchange(&ref.e->state, CACHE_ENTRY_STATE_WORKING); atomic_i32_eval_exchange(&e->state, CACHE_ENTRY_STATE_WORKING);
struct string path = tag.path; struct string path = tag.path;
logf_info("Loading sprite sheet [%F] \"%F\"", FMT_HEX(ref.e->hash.v), FMT_STR(path)); logf_info("Loading sprite sheet [%F] \"%F\"", FMT_HEX(e->hash.v), FMT_STR(path));
i64 start_ns = sys_time_ns(); i64 start_ns = sys_time_ns();
ASSERT(ref.e->kind == CACHE_ENTRY_KIND_SHEET); ASSERT(e->kind == CACHE_ENTRY_KIND_SHEET);
/* TODO: Replace arena allocs w/ buddy allocator */ /* TODO: Replace arena allocs w/ buddy allocator */
ref.e->arena = arena_alloc(SHEET_ARENA_RESERVE); e->arena = arena_alloc(SHEET_ARENA_RESERVE);
{ {
/* Decode */ /* Decode */
struct ase_decode_sheet_result decoded = ZI; struct ase_decode_sheet_result decoded = ZI;
if (resource_exists(path)) { if (resource_exists(path)) {
struct resource sheet_rs = resource_open(path); struct resource sheet_rs = resource_open(path);
{
ref.e->load_time_ns = sys_time_ns();
decoded = ase_decode_sheet(scratch.arena, resource_get_data(&sheet_rs)); decoded = ase_decode_sheet(scratch.arena, resource_get_data(&sheet_rs));
}
resource_close(&sheet_rs); resource_close(&sheet_rs);
/* Initialize */ /* Initialize */
ref.e->sheet = arena_push(&ref.e->arena, struct sprite_sheet); e->sheet = arena_push(&e->arena, struct sprite_sheet);
*ref.e->sheet = init_sheet_from_ase_result(&ref.e->arena, decoded); *e->sheet = init_sheet_from_ase_result(&e->arena, decoded);
ref.e->sheet->loaded = true; e->sheet->loaded = true;
ref.e->sheet->valid = true; e->sheet->valid = true;
} else { } else {
logf_error("Sprite \"%F\" not found", FMT_STR(path)); logf_error("Sprite \"%F\" not found", FMT_STR(path));
} }
} }
arena_set_readonly(&ref.e->arena); arena_set_readonly(&e->arena);
ref.e->memory_usage = ref.e->arena.committed; e->memory_usage = e->arena.committed;
atomic_u64_eval_add_u64(&G.cache.memory_usage, ref.e->memory_usage); atomic_u64_eval_add_u64(&G.cache.memory_usage, e->memory_usage);
f64 elapsed = SECONDS_FROM_NS(sys_time_ns() - start_ns); f64 elapsed = SECONDS_FROM_NS(sys_time_ns() - start_ns);
logf_info("Finished loading sprite sheet [%F] \"%F\" in %F seconds (cache size: %F bytes).", logf_info("Finished loading sprite sheet [%F] \"%F\" in %F seconds (cache size: %F bytes).",
FMT_HEX(ref.e->hash.v), FMT_HEX(e->hash.v),
FMT_STR(path), FMT_STR(path),
FMT_FLOAT(elapsed), FMT_FLOAT(elapsed),
FMT_UINT(ref.e->memory_usage)); FMT_UINT(e->memory_usage));
atomic_i32_eval_exchange(&ref.e->state, CACHE_ENTRY_STATE_LOADED); atomic_i32_eval_exchange(&e->state, CACHE_ENTRY_STATE_LOADED);
#if RESOURCE_RELOADING
struct cache_bin *bin = &G.cache.bins[e->hash.v % CACHE_BINS_COUNT];
struct sys_lock bin_lock = sys_mutex_lock_e(&bin->mutex);
{
for (struct cache_entry *old_entry = bin->first; old_entry; old_entry = old_entry->next_in_bin) {
if (old_entry->hash.v == e->hash.v) {
atomic_i32_eval_exchange(&old_entry->out_of_date, 1);
}
}
e->load_time_ns = sys_time_ns();
}
sys_mutex_unlock(&bin_lock);
#endif
scratch_end(scratch); scratch_end(scratch);
} }
/* ========================== * /* ========================== *
* Cache ref * Scope
* ========================== */ * ========================== */
INTERNAL void refcount_add(struct cache_entry *e, i32 amount) INTERNAL void refcount_add(struct cache_entry *e, i32 amount)
@ -689,26 +743,9 @@ INTERNAL void refcount_add(struct cache_entry *e, i32 amount)
} while (true); } while (true);
} }
INTERNAL struct cache_ref cache_ref_alloc(struct cache_ref src_ref) INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref_unsafe(struct sprite_scope *scope, struct cache_entry *e)
{ {
refcount_add(src_ref.e, 1);
return src_ref;
}
INTERNAL void cache_ref_release(struct cache_ref ref)
{
refcount_add(ref.e, -1);
}
/* ========================== *
* Scope
* ========================== */
INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref(struct sprite_scope *scope, struct cache_entry *e, struct sys_lock *bin_lock)
{
/* Since entry may not have an existing reference, bin must be locked to ensure entry isn't evicted while adding first reference */
u64 bin_index = e->hash.v % CACHE_BINS_COUNT; u64 bin_index = e->hash.v % CACHE_BINS_COUNT;
sys_assert_locked_e_or_s(bin_lock, &G.cache.bins[bin_index].mutex);
struct sprite_scope_cache_ref **slot = &scope->ref_node_bins[bin_index]; struct sprite_scope_cache_ref **slot = &scope->ref_node_bins[bin_index];
while (*slot) { while (*slot) {
@ -739,6 +776,19 @@ INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref(struct sprite_scope *sc
return *slot; return *slot;
} }
INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref_from_entry(struct sprite_scope *scope, struct cache_entry *e, struct sys_lock *bin_lock)
{
/* Guaranteed safe if caller has lock on entry's bin, since entry may not have an existing reference and could otherwise be evicted while ensuring this reference */
sys_assert_locked_e_or_s(bin_lock, &G.cache.bins[e->hash.v % CACHE_BINS_COUNT].mutex);
return scope_ensure_ref_unsafe(scope, e);
}
INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref_from_ref(struct sprite_scope *scope, struct cache_ref ref)
{
/* Safe since caller has ref */
return scope_ensure_ref_unsafe(scope, ref.e);
}
struct sprite_scope *sprite_scope_begin(void) struct sprite_scope *sprite_scope_begin(void)
{ {
/* Alloc scope */ /* Alloc scope */
@ -813,12 +863,12 @@ INTERNAL struct sprite_scope_cache_ref *cache_lookup(struct sprite_scope *scope,
} }
} }
if (match) { if (match) {
scope_ref = scope_ensure_ref(scope, match, bin_lock); scope_ref = scope_ensure_ref_from_entry(scope, match, bin_lock);
} }
#else #else
for (struct cache_entry *entry = bin->first; entry; entry = entry->next_in_bin) { for (struct cache_entry *entry = bin->first; entry; entry = entry->next_in_bin) {
if (entry->hash.v == hash.v) { if (entry->hash.v == hash.v) {
scope_ref = scope_ensure_ref(scope, entry, &bin_lock); scope_ref = scope_ensure_ref_from_entry(scope, entry, bin_lock);
break; break;
} }
} }
@ -827,25 +877,31 @@ INTERNAL struct sprite_scope_cache_ref *cache_lookup(struct sprite_scope *scope,
return scope_ref; return scope_ref;
} }
INTERNAL struct sprite_scope_cache_ref *cache_entry_from_tag(struct sprite_scope *scope, struct sprite_tag tag, enum cache_entry_kind kind) INTERNAL struct sprite_scope_cache_ref *cache_entry_from_tag(struct sprite_scope *scope, struct sprite_tag tag, enum cache_entry_kind kind, b32 force_new)
{ {
__prof; __prof;
struct cache_entry_hash hash = cache_entry_hash_from_tag_hash(tag.hash, kind); struct cache_entry_hash hash = cache_entry_hash_from_tag_hash(tag.hash, kind);
u64 bin_index = hash.v % CACHE_BINS_COUNT; u64 bin_index = hash.v % CACHE_BINS_COUNT;
struct cache_bin *bin = &G.cache.bins[hash.v % CACHE_BINS_COUNT]; struct sprite_scope_cache_ref *scope_ref = NULL;
/* Search for entry in scope */ /* Search for entry in scope */
struct sprite_scope_cache_ref *scope_ref = scope->ref_node_bins[bin_index]; if (!force_new) {
scope_ref = scope->ref_node_bins[bin_index];
while (scope_ref) { while (scope_ref) {
if (scope_ref->ref.e->hash.v == hash.v) { if (scope_ref->ref.e->hash.v == hash.v) {
break; break;
} }
scope_ref = scope_ref->next_in_bin; scope_ref = scope_ref->next_in_bin;
} }
}
/* Search for entry in cache */ /* If not in scope, search for entry in cache */
if (!scope_ref) { if (!scope_ref) {
struct cache_bin *bin = &G.cache.bins[bin_index];
/* Search in cache */
if (!force_new) {
struct sys_lock bin_lock = sys_mutex_lock_s(&bin->mutex); struct sys_lock bin_lock = sys_mutex_lock_s(&bin->mutex);
{ {
scope_ref = cache_lookup(scope, hash, &bin_lock); scope_ref = cache_lookup(scope, hash, &bin_lock);
@ -853,12 +909,15 @@ INTERNAL struct sprite_scope_cache_ref *cache_entry_from_tag(struct sprite_scope
sys_mutex_unlock(&bin_lock); sys_mutex_unlock(&bin_lock);
} }
/* Allocate new entry */ /* If not in cache, allocate new entry */
if (!scope_ref) { if (!scope_ref) {
struct sys_lock bin_lock = sys_mutex_lock_s(&bin->mutex); struct sys_lock bin_lock = sys_mutex_lock_e(&bin->mutex);
{ {
/* Search cache one more time in case an entry was allocated between locks */ /* Search cache one more time in case an entry was allocated between locks */
if (!force_new) {
scope_ref = cache_lookup(scope, hash, &bin_lock); scope_ref = cache_lookup(scope, hash, &bin_lock);
}
if (!scope_ref) { if (!scope_ref) {
/* Cache entry still absent, allocate new entry */ /* Cache entry still absent, allocate new entry */
struct cache_entry *entry = NULL; struct cache_entry *entry = NULL;
@ -889,11 +948,13 @@ INTERNAL struct sprite_scope_cache_ref *cache_entry_from_tag(struct sprite_scope
entry->texture = G.nil_texture; entry->texture = G.nil_texture;
entry->sheet = G.nil_sheet; entry->sheet = G.nil_sheet;
scope_ref = scope_ensure_ref(scope, entry, &bin_lock); scope_ref = scope_ensure_ref_from_entry(scope, entry, &bin_lock);
} }
} }
sys_mutex_unlock(&bin_lock); sys_mutex_unlock(&bin_lock);
} }
}
return scope_ref; return scope_ref;
} }
@ -908,7 +969,7 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
default: { sys_panic(LIT("Unknown sprite cache entry kind")); } break; default: { sys_panic(LIT("Unknown sprite cache entry kind")); } break;
} }
struct sprite_scope_cache_ref *scope_ref = cache_entry_from_tag(scope, tag, kind); struct sprite_scope_cache_ref *scope_ref = cache_entry_from_tag(scope, tag, kind, false);
struct cache_ref ref = scope_ref->ref; struct cache_ref ref = scope_ref->ref;
enum cache_entry_state state = atomic_i32_eval(&ref.e->state); enum cache_entry_state state = atomic_i32_eval(&ref.e->state);
@ -936,30 +997,7 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
} }
} else { } else {
/* Allocate cmd */ /* Allocate cmd */
struct load_cmd *cmd = NULL; push_load_task(ref, tag);
{
struct sys_lock lock = sys_mutex_lock_e(&G.load_cmds_mutex);
if (G.first_free_load_cmd) {
cmd = G.first_free_load_cmd;
G.first_free_load_cmd = cmd->next_free;
} else {
cmd = arena_push(&G.load_cmds_arena, struct load_cmd);
}
sys_mutex_unlock(&lock);
}
MEMZERO_STRUCT(cmd);
/* Initialize cmd */
cmd->ref = cache_ref_alloc(ref);
cmd->tag = tag;
{
u64 copy_len = min_u64(tag.path.len, ARRAY_COUNT(cmd->tag_path_buff));
cmd->tag.path.text = cmd->tag_path_buff;
MEMCPY(cmd->tag.path.text, tag.path.text, copy_len);
}
/* Push work */
work_push_task(&sprite_load_task, cmd, WORK_PRIORITY_NORMAL);
} }
} }
} }
@ -1077,7 +1115,7 @@ INTERNAL WORK_TASK_FUNC_DEF(sprite_load_task, arg)
{ {
__prof; __prof;
struct load_cmd *cmd = (struct load_cmd *)arg; struct load_cmd *cmd = (struct load_cmd *)arg;
struct cache_ref ref = cmd->ref; struct cache_ref ref = cmd->scope_ref->ref;
switch (ref.e->kind) { switch (ref.e->kind) {
case CACHE_ENTRY_KIND_TEXTURE: { case CACHE_ENTRY_KIND_TEXTURE: {
@ -1092,7 +1130,7 @@ INTERNAL WORK_TASK_FUNC_DEF(sprite_load_task, arg)
/* Free cmd */ /* Free cmd */
struct sys_lock lock = sys_mutex_lock_e(&G.load_cmds_mutex); struct sys_lock lock = sys_mutex_lock_e(&G.load_cmds_mutex);
{ {
cache_ref_release(cmd->ref); sprite_scope_end(cmd->scope);
cmd->next_free = G.first_free_load_cmd; cmd->next_free = G.first_free_load_cmd;
G.first_free_load_cmd = cmd; G.first_free_load_cmd = cmd;
} }
@ -1105,27 +1143,34 @@ INTERNAL WORK_TASK_FUNC_DEF(sprite_load_task, arg)
#if RESOURCE_RELOADING #if RESOURCE_RELOADING
INTERNAL void reload_if_exists(struct sprite_scope *scope, struct sprite_tag tag, enum cache_entry_kind kind)
{
struct cache_entry_hash hash = cache_entry_hash_from_tag_hash(tag.hash, kind);
struct cache_bin *bin = &G.cache.bins[hash.v % CACHE_BINS_COUNT];
struct sprite_scope_cache_ref *existing_ref = NULL;
struct sys_lock bin_lock = sys_mutex_lock_s(&bin->mutex);
{
existing_ref = cache_lookup(scope, hash, &bin_lock);
}
sys_mutex_unlock(&bin_lock);
if (existing_ref) {
logf_info("Sprite resource file \"%F\" has changed for sprite [%F].", FMT_STR(tag.path), FMT_HEX(hash.v));
struct sprite_scope_cache_ref *scope_ref = cache_entry_from_tag(scope, tag, kind, true);
push_load_task(scope_ref->ref, tag);
}
}
INTERNAL RESOURCE_WATCH_CALLBACK_FUNC_DEF(sprite_resource_watch_callback, name) INTERNAL RESOURCE_WATCH_CALLBACK_FUNC_DEF(sprite_resource_watch_callback, name)
{ {
b32 exists = false; struct sprite_scope *scope = sprite_scope_begin();
struct sprite_tag tag = sprite_tag_from_path(name); struct sprite_tag tag = sprite_tag_from_path(name);
for (enum cache_entry_kind kind = 0; kind < NUM_CACHE_ENTRY_KINDS; ++kind) { for (enum cache_entry_kind kind = 0; kind < NUM_CACHE_ENTRY_KINDS; ++kind) {
struct cache_entry_hash hash = cache_entry_hash_from_tag_hash(tag.hash, kind); reload_if_exists(scope, tag, kind);
u64 bin_index = hash.v % CACHE_BINS_COUNT;
struct cache_bin *bin = &G.cache.bins[bin_index];
struct sys_lock lock = sys_mutex_lock_s(&bin->mutex);
{
for (struct cache_entry *entry = bin->first; entry; entry = entry->next_in_bin) {
if (entry->hash.v == hash.v) {
if (!exists) {
logf_info("Sprite resource file \"%F\" has changed.", FMT_STR(name));
exists = true;
}
}
}
}
sys_mutex_unlock(&lock);
} }
sprite_scope_end(scope);
} }
#endif #endif
@ -1213,7 +1258,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
} }
/* Scratch arena should only contain evict array at this point */ /* Scratch arena should only contain evict array at this point */
ASSERT(scratch.arena->pos == (sizeof(*evict_array) * evict_array_count)); ASSERT(((scratch.arena->base + scratch.arena->pos) - (sizeof(*evict_array) * evict_array_count)) == (u8 *)evict_array);
/* Sort evict nodes */ /* Sort evict nodes */
{ {

View File

@ -393,8 +393,8 @@ void sys_mutex_unlock(struct sys_lock *lock);
void sys_assert_locked_e(struct sys_lock *lock, struct sys_mutex *mutex); void sys_assert_locked_e(struct sys_lock *lock, struct sys_mutex *mutex);
void sys_assert_locked_e_or_s(struct sys_lock *lock, struct sys_mutex *mutex); void sys_assert_locked_e_or_s(struct sys_lock *lock, struct sys_mutex *mutex);
#else #else
# define sys_assert_locked_e(l, m) # define sys_assert_locked_e(l, m) (UNUSED)l
# define sys_assert_locked_e_or_s(l, m) # define sys_assert_locked_e_or_s(l, m) (UNUSED)l
#endif #endif
/* ========================== * /* ========================== *