sprite cache ref struct

This commit is contained in:
jacob 2025-05-14 07:29:23 -05:00
parent dfe0034154
commit bf96dbb46a
2 changed files with 165 additions and 142 deletions

View File

@ -15,9 +15,8 @@
/* The evictor will begin evicting once cache usage is > threshold.
* It will entries until the budget has shrunk < target. */
/* TODO: Increase these for release (testing low values) */
#define CACHE_MEMORY_BUDGET_THRESHOLD (MEGABYTE(8))
#define CACHE_MEMORY_BUDGET_TARGET (MEGABYTE(4))
#define CACHE_MEMORY_BUDGET_THRESHOLD (MEGABYTE(256))
#define CACHE_MEMORY_BUDGET_TARGET (MEGABYTE(128))
CT_ASSERT(CACHE_MEMORY_BUDGET_THRESHOLD >= CACHE_MEMORY_BUDGET_TARGET);
#define CACHE_BINS_COUNT 1024
@ -37,18 +36,6 @@ CT_ASSERT(CACHE_MEMORY_BUDGET_THRESHOLD >= CACHE_MEMORY_BUDGET_TARGET);
#define SHEET_SPAN_LOOKUP_TABLE_BIN_RATIO 2.0
#define SHEET_SLICE_LOOKUP_TABLE_BIN_RATIO 2.0
/* ========================== *
* Loader cmd structs
* ========================== */
struct load_cmd {
struct sprite_scope *scope; /* Holds a reference to the sprite being loaded */
struct load_cmd *next_free;
struct cache_entry *cache_entry;
struct sprite_tag tag;
u8 tag_path_buff[512];
};
/* ========================== *
* Cache structs
* ========================== */
@ -67,22 +54,21 @@ enum cache_entry_state {
CACHE_ENTRY_STATE_LOADED
};
struct cache_entry_refcount {
struct cache_refcount {
i32 count; /* Number of scopes currently holding a reference to this entry */
i32 last_ref_cycle; /* Last time that refcount was modified */
};
CT_ASSERT(sizeof(struct cache_entry_refcount) == 8); /* Must fit into 64 bit atomic */
CT_ASSERT(sizeof(struct cache_refcount) == 8); /* Must fit into 64 bit atomic */
struct cache_entry_hash {
u64 v;
};
/* See evictor thread comments for info on cache entry lifetime */
struct cache_entry {
enum cache_entry_kind kind;
struct cache_entry_hash hash;
struct atomic_i32 state;
struct atomic_u64 refcount_struct; /* Cast eval to `cache_entry_refcount` */
struct atomic_u64 refcount_struct; /* Cast eval to `cache_refcount` */
/* Allocated data */
u64 memory_usage;
@ -115,9 +101,26 @@ struct cache {
struct cache_entry *entry_pool_first_free;
};
struct sprite_scope_reference {
struct cache_entry *cache_entry;
struct sprite_scope_reference *next_in_bin;
/* Represents a reference that can be used to safely access cache entry without it becoming evicted during the reference's lifetime */
struct cache_ref {
struct cache_entry *e;
};
/* A cache reference whose lifetime is bound to the scope it was retrieved from */
struct sprite_scope_cache_ref {
struct cache_ref ref;
struct sprite_scope_cache_ref *next_in_bin;
};
/* ========================== *
* Load cmd structs
* ========================== */
struct load_cmd {
struct load_cmd *next_free;
struct cache_ref ref;
struct sprite_tag tag;
u8 tag_path_buff[512];
};
/* ========================== *
@ -304,23 +307,24 @@ INTERNAL struct cache_entry_hash cache_entry_hash_from_tag_hash(u64 tag_hash, en
* Load
* ========================== */
INTERNAL void cache_entry_load_texture(struct cache_entry *n, struct sprite_tag tag)
INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag tag)
{
__prof;
struct temp_arena scratch = scratch_begin_no_conflict();
struct cache_entry *e = ref.e;
atomic_i32_eval_exchange(&n->state, CACHE_ENTRY_STATE_WORKING);
atomic_i32_eval_exchange(&e->state, CACHE_ENTRY_STATE_WORKING);
struct string path = tag.path;
logf_info("Loading sprite texture [%F] \"%F\"", FMT_HEX(n->hash.v), FMT_STR(path));
logf_info("Loading sprite texture [%F] \"%F\"", FMT_HEX(e->hash.v), FMT_STR(path));
i64 start_ns = sys_time_ns();
ASSERT(string_ends_with(path, LIT(".ase")));
ASSERT(n->kind == CACHE_ENTRY_KIND_TEXTURE);
ASSERT(e->kind == CACHE_ENTRY_KIND_TEXTURE);
/* TODO: Replace arena allocs w/ buddy allocator */
/* TODO: Arena probably overkill. Just using it to store texture struct. */
n->arena = arena_alloc(TEXTURE_ARENA_RESERVE);
e->arena = arena_alloc(TEXTURE_ARENA_RESERVE);
u64 memory_size = 0;
{
/* Decode */
@ -331,30 +335,30 @@ INTERNAL void cache_entry_load_texture(struct cache_entry *n, struct sprite_tag
resource_close(&texture_rs);
/* Initialize */
n->texture = arena_push(&n->arena, struct sprite_texture);
n->texture->width = decoded.image.width;
n->texture->height = decoded.image.height;
n->texture->texture = renderer_texture_alloc(RENDERER_TEXTURE_FORMAT_R8G8B8A8_UNORM, 0, V2I32(decoded.image.width, decoded.image.height), decoded.image.pixels);
n->texture->valid = true;
n->texture->loaded = true;
e->texture = arena_push(&e->arena, struct sprite_texture);
e->texture->width = decoded.image.width;
e->texture->height = decoded.image.height;
e->texture->texture = renderer_texture_alloc(RENDERER_TEXTURE_FORMAT_R8G8B8A8_UNORM, 0, V2I32(decoded.image.width, decoded.image.height), decoded.image.pixels);
e->texture->valid = true;
e->texture->loaded = true;
/* TODO: Query renderer for more accurate texture size in VRAM */
memory_size += (decoded.image.width * decoded.image.height) * sizeof(*decoded.image.pixels);
} else {
logf_error("Sprite [%F] \"%F\" not found", FMT_HEX(n->hash.v), FMT_STR(path));
logf_error("Sprite [%F] \"%F\" not found", FMT_HEX(e->hash.v), FMT_STR(path));
}
}
arena_set_readonly(&n->arena);
n->memory_usage = n->arena.committed + memory_size;
atomic_u64_eval_add_u64(&G.cache.memory_usage, n->memory_usage);
arena_set_readonly(&e->arena);
e->memory_usage = e->arena.committed + memory_size;
atomic_u64_eval_add_u64(&G.cache.memory_usage, e->memory_usage);
f64 elapsed = SECONDS_FROM_NS(sys_time_ns() - start_ns);
logf_info("Finished loading sprite texture [%F] \"%F\" in %F seconds (cache size: %F bytes).",
FMT_HEX(n->hash.v),
FMT_HEX(e->hash.v),
FMT_STR(path),
FMT_FLOAT(elapsed),
FMT_UINT(n->memory_usage));
FMT_UINT(e->memory_usage));
atomic_i32_eval_exchange(&n->state, CACHE_ENTRY_STATE_LOADED);
atomic_i32_eval_exchange(&e->state, CACHE_ENTRY_STATE_LOADED);
scratch_end(scratch);
}
@ -529,7 +533,7 @@ INTERNAL struct sprite_sheet init_sheet_from_ase_result(struct arena *arena, str
++index;
}
/* Propogate original slices into next frames (and first slices into previous frames) */
/* Propagate original slices into next frames (and first slices into previous frames) */
for (struct temp_slice_group_node *temp_slice_group_node = temp_slice_group_head; temp_slice_group_node; temp_slice_group_node = temp_slice_group_node->next) {
struct sprite_sheet_slice_group *slice_group = temp_slice_group_node->final_slice_group;
@ -541,7 +545,7 @@ INTERNAL struct sprite_sheet init_sheet_from_ase_result(struct arena *arena, str
struct sprite_sheet_slice *slice = &slice_group->frame_slices[(start * slice_group->per_frame_count) + index_in_frame];
/* Propogate earliest slice to all previous frames */
/* Propagate earliest slice to all previous frames */
if (start == node->earliest_frame && start > 0) {
for (u32 i = start; i-- > 0;) {
struct sprite_sheet_slice *target = &slice_group->frame_slices[(i * slice_group->per_frame_count) + index_in_frame];
@ -550,7 +554,7 @@ INTERNAL struct sprite_sheet init_sheet_from_ase_result(struct arena *arena, str
}
}
/* Propogate slice to forward frames until original is found */
/* Propagate slice to forward frames until original is found */
for (u32 i = start + 1; i < ase.num_frames; ++i) {
struct sprite_sheet_slice *target = &slice_group->frame_slices[(i * slice_group->per_frame_count) + index_in_frame];
if (target->original) {
@ -564,7 +568,7 @@ INTERNAL struct sprite_sheet init_sheet_from_ase_result(struct arena *arena, str
}
}
/* Calculate dirs */
/* Calculate direction vectors */
for (struct temp_slice_group_node *temp_slice_group_node = temp_slice_group_head; temp_slice_group_node; temp_slice_group_node = temp_slice_group_node->next) {
struct string ray_suffix = LIT(".ray");
@ -603,21 +607,21 @@ INTERNAL struct sprite_sheet init_sheet_from_ase_result(struct arena *arena, str
return sheet;
}
INTERNAL void cache_entry_load_sheet(struct cache_entry *n, struct sprite_tag tag)
INTERNAL void cache_entry_load_sheet(struct cache_ref ref, struct sprite_tag tag)
{
__prof;
struct temp_arena scratch = scratch_begin_no_conflict();
atomic_i32_eval_exchange(&n->state, CACHE_ENTRY_STATE_WORKING);
atomic_i32_eval_exchange(&ref.e->state, CACHE_ENTRY_STATE_WORKING);
struct string path = tag.path;
logf_info("Loading sprite sheet [%F] \"%F\"", FMT_HEX(n->hash.v), FMT_STR(path));
logf_info("Loading sprite sheet [%F] \"%F\"", FMT_HEX(ref.e->hash.v), FMT_STR(path));
i64 start_ns = sys_time_ns();
ASSERT(n->kind == CACHE_ENTRY_KIND_SHEET);
ASSERT(ref.e->kind == CACHE_ENTRY_KIND_SHEET);
/* TODO: Replace arena allocs w/ buddy allocator */
n->arena = arena_alloc(SHEET_ARENA_RESERVE);
ref.e->arena = arena_alloc(SHEET_ARENA_RESERVE);
{
/* Decode */
struct ase_decode_sheet_result decoded = ZI;
@ -627,95 +631,143 @@ INTERNAL void cache_entry_load_sheet(struct cache_entry *n, struct sprite_tag ta
resource_close(&sheet_rs);
/* Initialize */
n->sheet = arena_push(&n->arena, struct sprite_sheet);
*n->sheet = init_sheet_from_ase_result(&n->arena, decoded);
n->sheet->loaded = true;
n->sheet->valid = true;
ref.e->sheet = arena_push(&ref.e->arena, struct sprite_sheet);
*ref.e->sheet = init_sheet_from_ase_result(&ref.e->arena, decoded);
ref.e->sheet->loaded = true;
ref.e->sheet->valid = true;
} else {
logf_error("Sprite \"%F\" not found", FMT_STR(path));
}
}
arena_set_readonly(&n->arena);
n->memory_usage = n->arena.committed;
atomic_u64_eval_add_u64(&G.cache.memory_usage, n->memory_usage);
arena_set_readonly(&ref.e->arena);
ref.e->memory_usage = ref.e->arena.committed;
atomic_u64_eval_add_u64(&G.cache.memory_usage, ref.e->memory_usage);
f64 elapsed = SECONDS_FROM_NS(sys_time_ns() - start_ns);
logf_info("Finished loading sprite sheet [%F] \"%F\" in %F seconds (cache size: %F bytes).",
FMT_HEX(n->hash.v),
FMT_HEX(ref.e->hash.v),
FMT_STR(path),
FMT_FLOAT(elapsed),
FMT_UINT(n->memory_usage));
FMT_UINT(ref.e->memory_usage));
atomic_i32_eval_exchange(&n->state, CACHE_ENTRY_STATE_LOADED);
atomic_i32_eval_exchange(&ref.e->state, CACHE_ENTRY_STATE_LOADED);
scratch_end(scratch);
}
/* ========================== *
* Scope
* Cache ref
* ========================== */
INTERNAL void refcount_add(struct cache_entry *n, i32 amount)
INTERNAL void refcount_add(struct cache_entry *e, i32 amount)
{
i32 evictor_cycle = atomic_i32_eval(&G.evictor_cycle);
struct atomic_u64 *refcount_atomic = &n->refcount_struct;
struct atomic_u64 *refcount_atomic = &e->refcount_struct;
u64 old_refcount_uncast = atomic_u64_eval(refcount_atomic);
do {
struct cache_entry_refcount new_refcount = *(struct cache_entry_refcount *)&old_refcount_uncast;
struct cache_refcount new_refcount = *(struct cache_refcount *)&old_refcount_uncast;
new_refcount.count += amount;
new_refcount.last_ref_cycle = evictor_cycle;
u64 v = atomic_u64_eval_compare_exchange(refcount_atomic, old_refcount_uncast, *(u64 *)&new_refcount);
if (v != old_refcount_uncast) {
old_refcount_uncast = v;
} else {
ASSERT(new_refcount.count >= 0);
break;
}
} while (true);
}
/* Returns the slot at which the reference pointer should exist in the sprite scope.
* If the entry is not already referenced by the scope then the returned pointed to slot will point to NULL. */
INTERNAL struct sprite_scope_reference **sprite_scope_reference_slot_from_entry(struct sprite_scope *scope, struct cache_entry *cache_entry, u64 bin_index)
INTERNAL struct cache_ref cache_ref_alloc(struct cache_ref src_ref)
{
struct sprite_scope_reference **ref_slot = &scope->reference_bins[bin_index];
while (*ref_slot) {
if ((*ref_slot)->cache_entry == cache_entry) {
refcount_add(src_ref.e, 1);
return src_ref;
}
INTERNAL void cache_ref_release(struct cache_ref ref)
{
refcount_add(ref.e, -1);
}
/* ========================== *
* Scope
* ========================== */
INTERNAL struct sprite_scope_cache_ref *scope_get_ref_or_null(struct sprite_scope *scope, struct cache_entry *e, u64 bin_index, struct sys_lock *bin_lock)
{
sys_assert_locked_e_or_s(bin_lock, &G.cache.bins[bin_index].mutex); /* Since entry may not have a reference, bin must be locked to ensure entry doesn't evict */
struct sprite_scope_cache_ref *node = scope->ref_node_bins[bin_index];
while (node) {
if (node->ref.e == e) {
/* Found reference in scope */
break;
} else {
ref_slot = &(*ref_slot)->next_in_bin;
node = node->next_in_bin;
}
}
return ref_slot;
return node;
}
INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref(struct sprite_scope *scope, struct cache_entry *e, u64 bin_index, struct sys_lock *bin_lock)
{
sys_assert_locked_e_or_s(bin_lock, &G.cache.bins[bin_index].mutex); /* Since entry may not have a reference, bin must be locked to ensure entry doesn't evict */
struct sprite_scope_cache_ref **slot = &scope->ref_node_bins[bin_index];
while (*slot) {
if ((*slot)->ref.e == e) {
/* Found reference in scope */
break;
} else {
slot = &(*slot)->next_in_bin;
}
}
if (*slot == NULL) {
if (scope->num_references >= MAX_SCOPE_REFERENCES) {
sys_panic(LIT("Max sprite scope references reached"));
}
/* Increment refcount */
refcount_add(e, 1);
/* Grab node from pool */
struct sprite_scope_cache_ref *node = &scope->ref_node_pool[scope->num_references++];
MEMZERO_STRUCT(node);
node->ref.e = e;
*slot = node;
}
return *slot;
}
struct sprite_scope *sprite_scope_begin(void)
{
/* Alloc scope */
struct sprite_scope *res = NULL;
struct sprite_scope_reference **bins = NULL;
struct sprite_scope_reference *pool = NULL;
struct sprite_scope_cache_ref **bins = NULL;
struct sprite_scope_cache_ref *pool = NULL;
{
while (atomic_i32_eval_compare_exchange(&G.scopes_lock, 0, 1) != 0) ix_pause();
{
if (G.first_free_scope) {
res = G.first_free_scope;
G.first_free_scope = res->next_free;
bins = res->reference_bins;
pool = res->reference_pool;
bins = res->ref_node_bins;
pool = res->ref_node_pool;
} else {
res = arena_push(&G.scopes_arena, struct sprite_scope);
bins = arena_push_array(&G.scopes_arena, struct sprite_scope_reference *, CACHE_BINS_COUNT);
pool = arena_push_array(&G.scopes_arena, struct sprite_scope_reference, MAX_SCOPE_REFERENCES);
bins = arena_push_array(&G.scopes_arena, struct sprite_scope_cache_ref *, CACHE_BINS_COUNT);
pool = arena_push_array(&G.scopes_arena, struct sprite_scope_cache_ref, MAX_SCOPE_REFERENCES);
}
}
atomic_i32_eval_exchange(&G.scopes_lock, 0);
}
MEMZERO_STRUCT(res);
MEMZERO(bins, sizeof(*bins) * CACHE_BINS_COUNT);
res->reference_bins = bins;
res->reference_pool = pool;
res->ref_node_bins = bins;
res->ref_node_pool = pool;
return res;
}
@ -724,8 +776,8 @@ void sprite_scope_end(struct sprite_scope *scope)
/* Dereference entries */
u64 num_references = scope->num_references;
for (u64 i = 0; i < num_references; ++i) {
struct sprite_scope_reference *ref = &scope->reference_pool[i];
refcount_add(ref->cache_entry, -1);
struct sprite_scope_cache_ref *n = &scope->ref_node_pool[i];
refcount_add(n->ref.e, -1);
}
/* Release scope */
@ -741,30 +793,11 @@ void sprite_scope_end(struct sprite_scope *scope)
* Cache interface
* ========================== */
/* `ref_slot` is result from `sprite_scope_reference_slot_from_entry` */
INTERNAL void entry_reference(struct cache_entry *cache_entry, struct sprite_scope *scope, struct sprite_scope_reference **ref_slot)
{
if (scope->num_references >= MAX_SCOPE_REFERENCES) {
sys_panic(LIT("Max sprite scope references reached"));
}
ASSERT(*ref_slot == NULL); /* Ref slot should not already have a reference present */
/* Increment refcount */
refcount_add(cache_entry, 1);
/* Grab reference from pool */
struct sprite_scope_reference *ref = &scope->reference_pool[scope->num_references++];
MEMZERO_STRUCT(ref);
ref->cache_entry = cache_entry;
*ref_slot = ref;
}
INTERNAL struct cache_entry *entry_lookup_touch_and_reference(struct sprite_scope *scope, struct sprite_tag tag, enum cache_entry_kind kind)
INTERNAL struct sprite_scope_cache_ref *cache_entry_lookup_touch(struct sprite_scope *scope, struct sprite_tag tag, enum cache_entry_kind kind)
{
__prof;
struct sprite_scope_cache_ref *scope_ref = NULL;
struct cache_entry *entry = NULL;
struct cache_entry *nonmatching = NULL;
struct cache_entry **nonmatching_next = NULL;
@ -776,29 +809,26 @@ INTERNAL struct cache_entry *entry_lookup_touch_and_reference(struct sprite_scop
/* Lookup */
/* TODO: Spinlock */
{
struct sys_lock lock = sys_mutex_lock_s(&bin->mutex);
struct sys_lock bin_lock = sys_mutex_lock_s(&bin->mutex);
nonmatching_next = &bin->first;
entry = *nonmatching_next;
while (entry) {
b32 match = false;
if (entry->hash.v == hash.v) {
struct sprite_scope_reference **ref_slot = sprite_scope_reference_slot_from_entry(scope, entry, bin_index);
#if RESOURCE_RELOADING
if (*ref_slot) {
scope_ref = scope_get_ref_or_null(scope, entry, bin_index, &bin_lock);
if (scope_ref) {
match = true;
} else {
if (atomic_i32_eval(&entry->out_of_date)) {
/* If entry is out of date and the scope doesn't already hold a reference to it, then ignore entry */
} else {
match = true;
entry_reference(entry, scope, ref_slot);
scope_ref = scope_ensure_ref(scope, entry, bin_index, &bin_lock);
}
}
#else
if (!(*ref_slot)) {
entry_reference(entry, scope, ref_slot);
}
scope_ref = scope_ensure_ref(scope, entry, ref_node, &bin_lock);
match = true;
#endif
}
@ -811,7 +841,7 @@ INTERNAL struct cache_entry *entry_lookup_touch_and_reference(struct sprite_scop
entry = *nonmatching_next;
}
}
sys_mutex_unlock(&lock);
sys_mutex_unlock(&bin_lock);
}
/* Allocate new entry if necessary */
@ -833,10 +863,7 @@ INTERNAL struct cache_entry *entry_lookup_touch_and_reference(struct sprite_scop
}
/* Init node and add to bin */
struct sprite_scope_reference **ref_slot = sprite_scope_reference_slot_from_entry(scope, entry, bin_index);
if (!(*ref_slot)) {
entry_reference(entry, scope, ref_slot);
}
scope_ref = scope_ensure_ref(scope, entry, bin_index, &bin_lock);
*nonmatching_next = entry;
if (nonmatching) {
nonmatching->next_in_bin = entry;
@ -850,7 +877,7 @@ INTERNAL struct cache_entry *entry_lookup_touch_and_reference(struct sprite_scop
sys_mutex_unlock(&bin_lock);
}
return entry;
return scope_ref;
}
INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_tag tag, enum cache_entry_kind kind, b32 await)
@ -863,28 +890,29 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
default: { sys_panic(LIT("Unknown sprite cache entry kind")); } break;
}
struct cache_entry *n = entry_lookup_touch_and_reference(scope, tag, kind);
struct sprite_scope_cache_ref *scope_ref = cache_entry_lookup_touch(scope, tag, kind);
struct cache_ref ref = scope_ref->ref;
enum cache_entry_state state = atomic_i32_eval(&n->state);
enum cache_entry_state state = atomic_i32_eval(&ref.e->state);
if (state == CACHE_ENTRY_STATE_LOADED) {
switch (kind) {
case CACHE_ENTRY_KIND_TEXTURE: { res = n->texture; } break;
case CACHE_ENTRY_KIND_SHEET: { res = n->sheet; } break;
case CACHE_ENTRY_KIND_TEXTURE: { res = ref.e->texture; } break;
case CACHE_ENTRY_KIND_SHEET: { res = ref.e->sheet; } break;
default: { sys_panic(LIT("Unknown sprite cache entry kind")); } break;
}
} else if (state == CACHE_ENTRY_STATE_NONE) {
/* If entry is new, load texture */
if (atomic_i32_eval_compare_exchange(&n->state, CACHE_ENTRY_STATE_NONE, CACHE_ENTRY_STATE_QUEUED) == CACHE_ENTRY_STATE_NONE) {
if (atomic_i32_eval_compare_exchange(&ref.e->state, CACHE_ENTRY_STATE_NONE, CACHE_ENTRY_STATE_QUEUED) == CACHE_ENTRY_STATE_NONE) {
/* If caller is awaiting result then just load now on the calling thread. Otherwise spawn a work task. */
if (await) {
switch (kind) {
case CACHE_ENTRY_KIND_TEXTURE: {
cache_entry_load_texture(n, tag);
res = n->texture;
cache_entry_load_texture(ref, tag);
res = ref.e->texture;
} break;
case CACHE_ENTRY_KIND_SHEET: {
cache_entry_load_sheet(n, tag);
res = n->sheet;
cache_entry_load_sheet(ref, tag);
res = ref.e->sheet;
} break;
default: { sys_panic(LIT("Unknown sprite cache entry kind")); } break;
}
@ -903,13 +931,8 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
}
MEMZERO_STRUCT(cmd);
u64 bin_index = n->hash.v % CACHE_BINS_COUNT;
/* Initialize cmd */
cmd->scope = sprite_scope_begin();
struct sprite_scope_reference **ref_slot = sprite_scope_reference_slot_from_entry(cmd->scope, n, bin_index);
entry_reference(n, cmd->scope, ref_slot);
cmd->cache_entry = n;
cmd->ref = cache_ref_alloc(ref);
cmd->tag = tag;
{
u64 copy_len = min_u64(tag.path.len, ARRAY_COUNT(cmd->tag_path_buff));
@ -925,7 +948,7 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
/* Spinlock until result is ready */
if (await && state != CACHE_ENTRY_STATE_LOADED) {
while (atomic_i32_eval(&n->state) != CACHE_ENTRY_STATE_LOADED) {
while (atomic_i32_eval(&ref.e->state) != CACHE_ENTRY_STATE_LOADED) {
ix_pause();
}
}
@ -1036,14 +1059,14 @@ INTERNAL WORK_TASK_FUNC_DEF(sprite_load_task, arg)
{
__prof;
struct load_cmd *cmd = (struct load_cmd *)arg;
struct cache_ref ref = cmd->ref;
struct cache_entry *n = cmd->cache_entry;
switch (n->kind) {
switch (ref.e->kind) {
case CACHE_ENTRY_KIND_TEXTURE: {
cache_entry_load_texture(n, cmd->tag);
cache_entry_load_texture(ref, cmd->tag);
} break;
case CACHE_ENTRY_KIND_SHEET: {
cache_entry_load_sheet(n, cmd->tag);
cache_entry_load_sheet(ref, cmd->tag);
} break;
default: { sys_panic(LIT("Unknown sprite cache node kind")); } break;
}
@ -1051,7 +1074,7 @@ INTERNAL WORK_TASK_FUNC_DEF(sprite_load_task, arg)
/* Free cmd */
struct sys_lock lock = sys_mutex_lock_e(&G.load_cmds_mutex);
{
sprite_scope_end(cmd->scope);
cache_ref_release(cmd->ref);
cmd->next_free = G.first_free_load_cmd;
G.first_free_load_cmd = cmd;
}
@ -1143,7 +1166,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
struct cache_entry *n = bin->first;
while (n) {
u64 refcount_uncast = atomic_u64_eval(&n->refcount_struct);
struct cache_entry_refcount refcount = *(struct cache_entry_refcount *)&refcount_uncast;
struct cache_refcount refcount = *(struct cache_refcount *)&refcount_uncast;
if (refcount.count <= 0) {
/* Add node to evict list */
#if RESOURCE_RELOADING
@ -1177,7 +1200,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
merge_sort(evict_array, evict_array_count, sizeof(*evict_array), evict_sort, NULL);
}
/* Remove evictable nodes from cache table until under budget */
/* Remove evictable nodes from cache until under budget */
struct evict_node *first_evicted = NULL;
{
__profscope(eviction_cache_removal);
@ -1190,7 +1213,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
i32 last_ref_cycle = en->last_ref_cycle;
b32 cache_over_budget_target = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET_TARGET;
{
struct cache_entry_refcount refcount = *(struct cache_entry_refcount *)atomic_u64_raw(&entry->refcount_struct);
struct cache_refcount refcount = *(struct cache_refcount *)atomic_u64_raw(&entry->refcount_struct);
if (refcount.count > 0 || (last_ref_cycle > 0 && refcount.last_ref_cycle != en->last_ref_cycle)) {
/* Cache node has been referenced since scan, skip node. */
} else if (cache_over_budget_target || last_ref_cycle == 0) {

View File

@ -32,8 +32,8 @@ b32 sprite_tag_eq(struct sprite_tag t1, struct sprite_tag t2);
* ========================== */
struct sprite_scope {
struct sprite_scope_reference **reference_bins;
struct sprite_scope_reference *reference_pool;
struct sprite_scope_cache_ref **ref_node_bins;
struct sprite_scope_cache_ref *ref_node_pool;
u64 num_references;
struct sprite_scope *next_free;
};