rename sprite cache_node -> cache_entry

This commit is contained in:
jacob 2025-05-14 05:14:10 -05:00
parent 5b13d4cd02
commit dfe0034154

View File

@ -14,7 +14,7 @@
#include "rand.h"
/* The evictor will begin evicting once cache usage is > threshold.
* It will nodes until the budget has shrunk < target. */
* It will entries until the budget has shrunk < target. */
/* TODO: Increase these for release (testing low values) */
#define CACHE_MEMORY_BUDGET_THRESHOLD (MEGABYTE(8))
#define CACHE_MEMORY_BUDGET_TARGET (MEGABYTE(4))
@ -44,7 +44,7 @@ CT_ASSERT(CACHE_MEMORY_BUDGET_THRESHOLD >= CACHE_MEMORY_BUDGET_TARGET);
struct load_cmd {
struct sprite_scope *scope; /* Holds a reference to the sprite being loaded */
struct load_cmd *next_free;
struct cache_node *cache_node;
struct cache_entry *cache_entry;
struct sprite_tag tag;
u8 tag_path_buff[512];
};
@ -53,36 +53,36 @@ struct load_cmd {
* Cache structs
* ========================== */
enum cache_node_kind {
CACHE_NODE_KIND_TEXTURE,
CACHE_NODE_KIND_SHEET,
enum cache_entry_kind {
CACHE_ENTRY_KIND_TEXTURE,
CACHE_ENTRY_KIND_SHEET,
NUM_CACHE_NODE_KINDS
NUM_CACHE_ENTRY_KINDS
};
enum cache_node_state {
CACHE_NODE_STATE_NONE,
CACHE_NODE_STATE_QUEUED,
CACHE_NODE_STATE_WORKING,
CACHE_NODE_STATE_LOADED
enum cache_entry_state {
CACHE_ENTRY_STATE_NONE,
CACHE_ENTRY_STATE_QUEUED,
CACHE_ENTRY_STATE_WORKING,
CACHE_ENTRY_STATE_LOADED
};
struct cache_node_refcount {
i32 count; /* Number of scopes currently holding a reference to this node */
struct cache_entry_refcount {
i32 count; /* Number of scopes currently holding a reference to this entry */
i32 last_ref_cycle; /* Last time that refcount was modified */
};
CT_ASSERT(sizeof(struct cache_node_refcount) == 8); /* Must fit into 64 bit atomic */
CT_ASSERT(sizeof(struct cache_entry_refcount) == 8); /* Must fit into 64 bit atomic */
struct cache_node_hash {
struct cache_entry_hash {
u64 v;
};
/* See evictor thread comments for info on cache node lifetime */
struct cache_node {
enum cache_node_kind kind;
struct cache_node_hash hash;
/* See evictor thread comments for info on cache entry lifetime */
struct cache_entry {
enum cache_entry_kind kind;
struct cache_entry_hash hash;
struct atomic_i32 state;
struct atomic_u64 refcount_struct; /* Cast eval to `cache_node_refcount` */
struct atomic_u64 refcount_struct; /* Cast eval to `cache_entry_refcount` */
/* Allocated data */
u64 memory_usage;
@ -91,32 +91,32 @@ struct cache_node {
struct sprite_sheet *sheet;
/* Hash list */
struct cache_node *next_in_bin;
struct cache_node *prev_in_bin;
struct cache_entry *next_in_bin;
struct cache_entry *prev_in_bin;
/* Free list */
struct cache_node *next_free;
struct cache_entry *next_free;
#if RESOURCE_RELOADING
struct atomic_i32 out_of_date; /* Has the resource changed since this node was loaded */
struct atomic_i32 out_of_date; /* Has the resource changed since this entry was loaded */
#endif
};
struct cache_bin {
struct sys_mutex mutex;
struct cache_node *first;
struct cache_entry *first;
};
struct cache {
struct atomic_u64 memory_usage;
struct arena arena;
struct cache_bin *bins;
struct sys_mutex node_pool_mutex;
struct cache_node *node_pool_first_free;
struct sys_mutex entry_pool_mutex;
struct cache_entry *entry_pool_first_free;
};
struct sprite_scope_reference {
struct cache_node *cache_node;
struct cache_entry *cache_entry;
struct sprite_scope_reference *next_in_bin;
};
@ -236,7 +236,7 @@ struct sprite_startup_receipt sprite_startup(struct renderer_startup_receipt *re
}
arena_set_readonly(&G.perm_arena);
G.cache.node_pool_mutex = sys_mutex_alloc();
G.cache.entry_pool_mutex = sys_mutex_alloc();
G.cache.arena = arena_alloc(GIGABYTE(64));
G.cache.bins = arena_push_array_zero(&G.cache.arena, struct cache_bin, CACHE_BINS_COUNT);
for (u64 i = 0; i < CACHE_BINS_COUNT; ++i) {
@ -295,28 +295,28 @@ b32 sprite_tag_eq(struct sprite_tag t1, struct sprite_tag t2)
return t1.hash == t2.hash;
}
INTERNAL struct cache_node_hash cache_node_hash_from_tag_hash(u64 tag_hash, enum cache_node_kind kind)
INTERNAL struct cache_entry_hash cache_entry_hash_from_tag_hash(u64 tag_hash, enum cache_entry_kind kind)
{
return (struct cache_node_hash) { .v = rand_u64_from_seed(tag_hash + kind) };
return (struct cache_entry_hash) { .v = rand_u64_from_seed(tag_hash + kind) };
}
/* ========================== *
* Load
* ========================== */
INTERNAL void cache_node_load_texture(struct cache_node *n, struct sprite_tag tag)
INTERNAL void cache_entry_load_texture(struct cache_entry *n, struct sprite_tag tag)
{
__prof;
struct temp_arena scratch = scratch_begin_no_conflict();
atomic_i32_eval_exchange(&n->state, CACHE_NODE_STATE_WORKING);
atomic_i32_eval_exchange(&n->state, CACHE_ENTRY_STATE_WORKING);
struct string path = tag.path;
logf_info("Loading sprite texture [%F] \"%F\"", FMT_HEX(n->hash.v), FMT_STR(path));
i64 start_ns = sys_time_ns();
ASSERT(string_ends_with(path, LIT(".ase")));
ASSERT(n->kind == CACHE_NODE_KIND_TEXTURE);
ASSERT(n->kind == CACHE_ENTRY_KIND_TEXTURE);
/* TODO: Replace arena allocs w/ buddy allocator */
/* TODO: Arena probably overkill. Just using it to store texture struct. */
@ -354,7 +354,7 @@ INTERNAL void cache_node_load_texture(struct cache_node *n, struct sprite_tag ta
FMT_FLOAT(elapsed),
FMT_UINT(n->memory_usage));
atomic_i32_eval_exchange(&n->state, CACHE_NODE_STATE_LOADED);
atomic_i32_eval_exchange(&n->state, CACHE_ENTRY_STATE_LOADED);
scratch_end(scratch);
}
@ -603,18 +603,18 @@ INTERNAL struct sprite_sheet init_sheet_from_ase_result(struct arena *arena, str
return sheet;
}
INTERNAL void cache_node_load_sheet(struct cache_node *n, struct sprite_tag tag)
INTERNAL void cache_entry_load_sheet(struct cache_entry *n, struct sprite_tag tag)
{
__prof;
struct temp_arena scratch = scratch_begin_no_conflict();
atomic_i32_eval_exchange(&n->state, CACHE_NODE_STATE_WORKING);
atomic_i32_eval_exchange(&n->state, CACHE_ENTRY_STATE_WORKING);
struct string path = tag.path;
logf_info("Loading sprite sheet [%F] \"%F\"", FMT_HEX(n->hash.v), FMT_STR(path));
i64 start_ns = sys_time_ns();
ASSERT(n->kind == CACHE_NODE_KIND_SHEET);
ASSERT(n->kind == CACHE_ENTRY_KIND_SHEET);
/* TODO: Replace arena allocs w/ buddy allocator */
n->arena = arena_alloc(SHEET_ARENA_RESERVE);
@ -647,7 +647,7 @@ INTERNAL void cache_node_load_sheet(struct cache_node *n, struct sprite_tag tag)
FMT_UINT(n->memory_usage));
atomic_i32_eval_exchange(&n->state, CACHE_NODE_STATE_LOADED);
atomic_i32_eval_exchange(&n->state, CACHE_ENTRY_STATE_LOADED);
scratch_end(scratch);
}
@ -656,13 +656,13 @@ INTERNAL void cache_node_load_sheet(struct cache_node *n, struct sprite_tag tag)
* Scope
* ========================== */
INTERNAL void refcount_add(struct cache_node *n, i32 amount)
INTERNAL void refcount_add(struct cache_entry *n, i32 amount)
{
i32 evictor_cycle = atomic_i32_eval(&G.evictor_cycle);
struct atomic_u64 *refcount_atomic = &n->refcount_struct;
u64 old_refcount_uncast = atomic_u64_eval(refcount_atomic);
do {
struct cache_node_refcount new_refcount = *(struct cache_node_refcount *)&old_refcount_uncast;
struct cache_entry_refcount new_refcount = *(struct cache_entry_refcount *)&old_refcount_uncast;
new_refcount.count += amount;
new_refcount.last_ref_cycle = evictor_cycle;
u64 v = atomic_u64_eval_compare_exchange(refcount_atomic, old_refcount_uncast, *(u64 *)&new_refcount);
@ -675,12 +675,12 @@ INTERNAL void refcount_add(struct cache_node *n, i32 amount)
}
/* Returns the slot at which the reference pointer should exist in the sprite scope.
* If the node is not already referenced by the scope then the returned pointed to slot will point to NULL. */
INTERNAL struct sprite_scope_reference **sprite_scope_reference_slot_from_node(struct sprite_scope *scope, struct cache_node *cache_node, u64 bin_index)
* If the entry is not already referenced by the scope then the returned pointed to slot will point to NULL. */
INTERNAL struct sprite_scope_reference **sprite_scope_reference_slot_from_entry(struct sprite_scope *scope, struct cache_entry *cache_entry, u64 bin_index)
{
struct sprite_scope_reference **ref_slot = &scope->reference_bins[bin_index];
while (*ref_slot) {
if ((*ref_slot)->cache_node == cache_node) {
if ((*ref_slot)->cache_entry == cache_entry) {
/* Found reference in scope */
break;
} else {
@ -721,11 +721,11 @@ struct sprite_scope *sprite_scope_begin(void)
void sprite_scope_end(struct sprite_scope *scope)
{
/* Dereference nodes */
/* Dereference entries */
u64 num_references = scope->num_references;
for (u64 i = 0; i < num_references; ++i) {
struct sprite_scope_reference *ref = &scope->reference_pool[i];
refcount_add(ref->cache_node, -1);
refcount_add(ref->cache_entry, -1);
}
/* Release scope */
@ -741,8 +741,8 @@ void sprite_scope_end(struct sprite_scope *scope)
* Cache interface
* ========================== */
/* `ref_slot` is result from `sprite_scope_reference_slot_from_node` */
INTERNAL void node_reference(struct cache_node *cache_node, struct sprite_scope *scope, struct sprite_scope_reference **ref_slot)
/* `ref_slot` is result from `sprite_scope_reference_slot_from_entry` */
INTERNAL void entry_reference(struct cache_entry *cache_entry, struct sprite_scope *scope, struct sprite_scope_reference **ref_slot)
{
if (scope->num_references >= MAX_SCOPE_REFERENCES) {
sys_panic(LIT("Max sprite scope references reached"));
@ -751,25 +751,25 @@ INTERNAL void node_reference(struct cache_node *cache_node, struct sprite_scope
ASSERT(*ref_slot == NULL); /* Ref slot should not already have a reference present */
/* Increment refcount */
refcount_add(cache_node, 1);
refcount_add(cache_entry, 1);
/* Grab reference from pool */
struct sprite_scope_reference *ref = &scope->reference_pool[scope->num_references++];
MEMZERO_STRUCT(ref);
ref->cache_node = cache_node;
ref->cache_entry = cache_entry;
*ref_slot = ref;
}
INTERNAL struct cache_node *node_lookup_touch_and_reference(struct sprite_scope *scope, struct sprite_tag tag, enum cache_node_kind kind)
INTERNAL struct cache_entry *entry_lookup_touch_and_reference(struct sprite_scope *scope, struct sprite_tag tag, enum cache_entry_kind kind)
{
__prof;
struct cache_node *n = NULL;
struct cache_node *nonmatching = NULL;
struct cache_node **nonmatching_next = NULL;
struct cache_entry *entry = NULL;
struct cache_entry *nonmatching = NULL;
struct cache_entry **nonmatching_next = NULL;
struct cache_node_hash hash = cache_node_hash_from_tag_hash(tag.hash, kind);
struct cache_entry_hash hash = cache_entry_hash_from_tag_hash(tag.hash, kind);
u64 bin_index = hash.v % CACHE_BINS_COUNT;
struct cache_bin *bin = &G.cache.bins[bin_index];
@ -778,26 +778,26 @@ INTERNAL struct cache_node *node_lookup_touch_and_reference(struct sprite_scope
{
struct sys_lock lock = sys_mutex_lock_s(&bin->mutex);
nonmatching_next = &bin->first;
n = *nonmatching_next;
while (n) {
entry = *nonmatching_next;
while (entry) {
b32 match = false;
if (n->hash.v == hash.v) {
struct sprite_scope_reference **ref_slot = sprite_scope_reference_slot_from_node(scope, n, bin_index);
if (entry->hash.v == hash.v) {
struct sprite_scope_reference **ref_slot = sprite_scope_reference_slot_from_entry(scope, entry, bin_index);
#if RESOURCE_RELOADING
if (*ref_slot) {
match = true;
} else {
if (atomic_i32_eval(&n->out_of_date)) {
/* If node is out of date and the scope doesn't already hold a reference to it, then ignore node */
if (atomic_i32_eval(&entry->out_of_date)) {
/* If entry is out of date and the scope doesn't already hold a reference to it, then ignore entry */
} else {
match = true;
node_reference(n, scope, ref_slot);
entry_reference(entry, scope, ref_slot);
}
}
#else
if (!(*ref_slot)) {
node_reference(n, scope, ref_slot);
entry_reference(entry, scope, ref_slot);
}
match = true;
#endif
@ -806,87 +806,87 @@ INTERNAL struct cache_node *node_lookup_touch_and_reference(struct sprite_scope
if (match) {
break;
} else {
nonmatching = n;
nonmatching = entry;
nonmatching_next = &nonmatching->next_in_bin;
n = *nonmatching_next;
entry = *nonmatching_next;
}
}
sys_mutex_unlock(&lock);
}
/* Allocate new node if necessary */
if (!n) {
__profscope(node_lookup_allocate);
/* Allocate new entry if necessary */
if (!entry) {
__profscope(entry_lookup_allocate);
struct sys_lock bin_lock = sys_mutex_lock_e(&bin->mutex);
{
/* Alloc node */
/* Alloc entry */
{
struct sys_lock pool_lock = sys_mutex_lock_e(&G.cache.node_pool_mutex);
if (G.cache.node_pool_first_free) {
n = G.cache.node_pool_first_free;
G.cache.node_pool_first_free = n->next_free;
MEMZERO_STRUCT(n);
struct sys_lock pool_lock = sys_mutex_lock_e(&G.cache.entry_pool_mutex);
if (G.cache.entry_pool_first_free) {
entry = G.cache.entry_pool_first_free;
G.cache.entry_pool_first_free = entry->next_free;
MEMZERO_STRUCT(entry);
} else {
n = arena_push_zero(&G.cache.arena, struct cache_node);
entry = arena_push_zero(&G.cache.arena, struct cache_entry);
}
sys_mutex_unlock(&pool_lock);
}
/* Init node and add to bin */
struct sprite_scope_reference **ref_slot = sprite_scope_reference_slot_from_node(scope, n, bin_index);
struct sprite_scope_reference **ref_slot = sprite_scope_reference_slot_from_entry(scope, entry, bin_index);
if (!(*ref_slot)) {
node_reference(n, scope, ref_slot);
entry_reference(entry, scope, ref_slot);
}
*nonmatching_next = n;
*nonmatching_next = entry;
if (nonmatching) {
nonmatching->next_in_bin = n;
n->prev_in_bin = nonmatching;
nonmatching->next_in_bin = entry;
entry->prev_in_bin = nonmatching;
}
n->hash = cache_node_hash_from_tag_hash(tag.hash, kind);
n->kind = kind;
n->texture = G.nil_texture;
n->sheet = G.nil_sheet;
entry->hash = cache_entry_hash_from_tag_hash(tag.hash, kind);
entry->kind = kind;
entry->texture = G.nil_texture;
entry->sheet = G.nil_sheet;
}
sys_mutex_unlock(&bin_lock);
}
return n;
return entry;
}
INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_tag tag, enum cache_node_kind kind, b32 await)
INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_tag tag, enum cache_entry_kind kind, b32 await)
{
/* TODO: Replace switch statements */
void *res = NULL;
switch (kind) {
case CACHE_NODE_KIND_TEXTURE: { res = G.loading_texture; } break;
case CACHE_NODE_KIND_SHEET: { res = G.loading_sheet; } break;
default: { sys_panic(LIT("Unknown sprite cache node kind")); } break;
case CACHE_ENTRY_KIND_TEXTURE: { res = G.loading_texture; } break;
case CACHE_ENTRY_KIND_SHEET: { res = G.loading_sheet; } break;
default: { sys_panic(LIT("Unknown sprite cache entry kind")); } break;
}
struct cache_node *n = node_lookup_touch_and_reference(scope, tag, kind);
struct cache_entry *n = entry_lookup_touch_and_reference(scope, tag, kind);
enum cache_node_state state = atomic_i32_eval(&n->state);
if (state == CACHE_NODE_STATE_LOADED) {
enum cache_entry_state state = atomic_i32_eval(&n->state);
if (state == CACHE_ENTRY_STATE_LOADED) {
switch (kind) {
case CACHE_NODE_KIND_TEXTURE: { res = n->texture; } break;
case CACHE_NODE_KIND_SHEET: { res = n->sheet; } break;
default: { sys_panic(LIT("Unknown sprite cache node kind")); } break;
case CACHE_ENTRY_KIND_TEXTURE: { res = n->texture; } break;
case CACHE_ENTRY_KIND_SHEET: { res = n->sheet; } break;
default: { sys_panic(LIT("Unknown sprite cache entry kind")); } break;
}
} else if (state == CACHE_NODE_STATE_NONE) {
/* If node is new, load texture */
if (atomic_i32_eval_compare_exchange(&n->state, CACHE_NODE_STATE_NONE, CACHE_NODE_STATE_QUEUED) == CACHE_NODE_STATE_NONE) {
} else if (state == CACHE_ENTRY_STATE_NONE) {
/* If entry is new, load texture */
if (atomic_i32_eval_compare_exchange(&n->state, CACHE_ENTRY_STATE_NONE, CACHE_ENTRY_STATE_QUEUED) == CACHE_ENTRY_STATE_NONE) {
/* If caller is awaiting result then just load now on the calling thread. Otherwise spawn a work task. */
if (await) {
switch (kind) {
case CACHE_NODE_KIND_TEXTURE: {
cache_node_load_texture(n, tag);
case CACHE_ENTRY_KIND_TEXTURE: {
cache_entry_load_texture(n, tag);
res = n->texture;
} break;
case CACHE_NODE_KIND_SHEET: {
cache_node_load_sheet(n, tag);
case CACHE_ENTRY_KIND_SHEET: {
cache_entry_load_sheet(n, tag);
res = n->sheet;
} break;
default: { sys_panic(LIT("Unknown sprite cache node kind")); } break;
default: { sys_panic(LIT("Unknown sprite cache entry kind")); } break;
}
} else {
/* Allocate cmd */
@ -907,9 +907,9 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
/* Initialize cmd */
cmd->scope = sprite_scope_begin();
struct sprite_scope_reference **ref_slot = sprite_scope_reference_slot_from_node(cmd->scope, n, bin_index);
node_reference(n, cmd->scope, ref_slot);
cmd->cache_node = n;
struct sprite_scope_reference **ref_slot = sprite_scope_reference_slot_from_entry(cmd->scope, n, bin_index);
entry_reference(n, cmd->scope, ref_slot);
cmd->cache_entry = n;
cmd->tag = tag;
{
u64 copy_len = min_u64(tag.path.len, ARRAY_COUNT(cmd->tag_path_buff));
@ -924,8 +924,8 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
}
/* Spinlock until result is ready */
if (await && state != CACHE_NODE_STATE_LOADED) {
while (atomic_i32_eval(&n->state) != CACHE_NODE_STATE_LOADED) {
if (await && state != CACHE_ENTRY_STATE_LOADED) {
while (atomic_i32_eval(&n->state) != CACHE_ENTRY_STATE_LOADED) {
ix_pause();
}
}
@ -940,13 +940,13 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
struct sprite_texture *sprite_texture_from_tag_await(struct sprite_scope *scope, struct sprite_tag tag)
{
__prof;
return (struct sprite_texture *)data_from_tag_internal(scope, tag, CACHE_NODE_KIND_TEXTURE, true);
return (struct sprite_texture *)data_from_tag_internal(scope, tag, CACHE_ENTRY_KIND_TEXTURE, true);
}
struct sprite_texture *sprite_texture_from_tag_async(struct sprite_scope *scope, struct sprite_tag tag)
{
__prof;
return (struct sprite_texture *)data_from_tag_internal(scope, tag, CACHE_NODE_KIND_TEXTURE, false);
return (struct sprite_texture *)data_from_tag_internal(scope, tag, CACHE_ENTRY_KIND_TEXTURE, false);
}
/* ========================== *
@ -956,13 +956,13 @@ struct sprite_texture *sprite_texture_from_tag_async(struct sprite_scope *scope,
struct sprite_sheet *sprite_sheet_from_tag_await(struct sprite_scope *scope, struct sprite_tag tag)
{
__prof;
return (struct sprite_sheet *)data_from_tag_internal(scope, tag, CACHE_NODE_KIND_SHEET, true);
return (struct sprite_sheet *)data_from_tag_internal(scope, tag, CACHE_ENTRY_KIND_SHEET, true);
}
struct sprite_sheet *sprite_sheet_from_tag_async(struct sprite_scope *scope, struct sprite_tag tag)
{
__prof;
return (struct sprite_sheet *)data_from_tag_internal(scope, tag, CACHE_NODE_KIND_SHEET, false);
return (struct sprite_sheet *)data_from_tag_internal(scope, tag, CACHE_ENTRY_KIND_SHEET, false);
}
struct sprite_sheet_frame sprite_sheet_get_frame(struct sprite_sheet *sheet, u32 index)
@ -1037,13 +1037,13 @@ INTERNAL WORK_TASK_FUNC_DEF(sprite_load_task, arg)
__prof;
struct load_cmd *cmd = (struct load_cmd *)arg;
struct cache_node *n = cmd->cache_node;
struct cache_entry *n = cmd->cache_entry;
switch (n->kind) {
case CACHE_NODE_KIND_TEXTURE: {
cache_node_load_texture(n, cmd->tag);
case CACHE_ENTRY_KIND_TEXTURE: {
cache_entry_load_texture(n, cmd->tag);
} break;
case CACHE_NODE_KIND_SHEET: {
cache_node_load_sheet(n, cmd->tag);
case CACHE_ENTRY_KIND_SHEET: {
cache_entry_load_sheet(n, cmd->tag);
} break;
default: { sys_panic(LIT("Unknown sprite cache node kind")); } break;
}
@ -1067,13 +1067,13 @@ INTERNAL WORK_TASK_FUNC_DEF(sprite_load_task, arg)
INTERNAL RESOURCE_WATCH_CALLBACK_FUNC_DEF(sprite_resource_watch_callback, name)
{
struct sprite_tag tag = sprite_tag_from_path(name);
for (enum cache_node_kind kind = 0; kind < NUM_CACHE_NODE_KINDS; ++kind) {
struct cache_node_hash hash = cache_node_hash_from_tag_hash(tag.hash, kind);
for (enum cache_entry_kind kind = 0; kind < NUM_CACHE_ENTRY_KINDS; ++kind) {
struct cache_entry_hash hash = cache_entry_hash_from_tag_hash(tag.hash, kind);
u64 bin_index = hash.v % CACHE_BINS_COUNT;
struct cache_bin *bin = &G.cache.bins[bin_index];
struct sys_lock lock = sys_mutex_lock_s(&bin->mutex);
{
for (struct cache_node *n = bin->first; n; n = n->next_in_bin) {
for (struct cache_entry *n = bin->first; n; n = n->next_in_bin) {
if (n->hash.v == hash.v) {
logf_info("Sprite resource file \"%F\" has changed and will be reloaded.", FMT_STR(name));
atomic_i32_eval_exchange(&n->out_of_date, 1);
@ -1092,7 +1092,7 @@ INTERNAL RESOURCE_WATCH_CALLBACK_FUNC_DEF(sprite_resource_watch_callback, name)
struct evict_node {
i32 last_ref_cycle;
struct cache_node *cache_node;
struct cache_entry *cache_entry;
struct cache_bin *cache_bin;
struct evict_node *next_evicted;
@ -1140,10 +1140,10 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
struct cache_bin *bin = &G.cache.bins[i];
struct sys_lock bin_lock = sys_mutex_lock_s(&bin->mutex);
{
struct cache_node *n = bin->first;
struct cache_entry *n = bin->first;
while (n) {
u64 refcount_uncast = atomic_u64_eval(&n->refcount_struct);
struct cache_node_refcount refcount = *(struct cache_node_refcount *)&refcount_uncast;
struct cache_entry_refcount refcount = *(struct cache_entry_refcount *)&refcount_uncast;
if (refcount.count <= 0) {
/* Add node to evict list */
#if RESOURCE_RELOADING
@ -1154,7 +1154,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
b32 is_old = cache_over_budget_threshold && ((cur_cycle - refcount.last_ref_cycle) > EVICTOR_GRACE_PERIOD_CYCLES);
if (is_old || is_out_of_date) {
struct evict_node *en = arena_push_zero(scratch.arena, struct evict_node);
en->cache_node = n;
en->cache_entry = n;
en->cache_bin = bin;
en->last_ref_cycle = refcount.last_ref_cycle * !is_out_of_date; /* If out of date then set last cycle to 0 */
++evict_array_count;
@ -1185,25 +1185,25 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
for (u64 i = 0; i < evict_array_count && !stop_evicting; ++i) {
struct evict_node *en = &evict_array[i];
struct cache_bin *bin = en->cache_bin;
struct cache_node *n = en->cache_node;
struct cache_entry *entry = en->cache_entry;
struct sys_lock bin_lock = sys_mutex_lock_e(&bin->mutex);
i32 last_ref_cycle = en->last_ref_cycle;
b32 cache_over_budget_target = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET_TARGET;
{
struct cache_node_refcount refcount = *(struct cache_node_refcount *)atomic_u64_raw(&n->refcount_struct);
struct cache_entry_refcount refcount = *(struct cache_entry_refcount *)atomic_u64_raw(&entry->refcount_struct);
if (refcount.count > 0 || (last_ref_cycle > 0 && refcount.last_ref_cycle != en->last_ref_cycle)) {
/* Cache node has been referenced since scan, skip node. */
} else if (cache_over_budget_target || last_ref_cycle == 0) {
/* Remove from cache bin */
if (n->prev_in_bin) {
n->prev_in_bin->next_in_bin = n->next_in_bin;
if (entry->prev_in_bin) {
entry->prev_in_bin->next_in_bin = entry->next_in_bin;
} else {
bin->first = n->next_in_bin;
bin->first = entry->next_in_bin;
}
if (n->next_in_bin) {
n->next_in_bin->prev_in_bin = n->prev_in_bin;
if (entry->next_in_bin) {
entry->next_in_bin->prev_in_bin = entry->prev_in_bin;
}
atomic_u64_eval_add_i64(&G.cache.memory_usage, -((i64)n->memory_usage));
atomic_u64_eval_add_i64(&G.cache.memory_usage, -((i64)entry->memory_usage));
/* Add to evicted list */
en->next_evicted = first_evicted;
@ -1222,8 +1222,8 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
{
__profscope(eviction_memory_release);
for (struct evict_node *en = first_evicted; en; en = en->next_evicted) {
struct cache_node *n = en->cache_node;
if (n->kind == CACHE_NODE_KIND_TEXTURE && n->texture->valid) {
struct cache_entry *n = en->cache_entry;
if (n->kind == CACHE_ENTRY_KIND_TEXTURE && n->texture->valid) {
renderer_texture_release(n->texture->texture);
}
arena_release(&n->arena);
@ -1233,11 +1233,11 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
/* Add evicted nodes to free list */
{
__profscope(eviction_free_list_append);
struct sys_lock pool_lock = sys_mutex_lock_e(&G.cache.node_pool_mutex);
struct sys_lock pool_lock = sys_mutex_lock_e(&G.cache.entry_pool_mutex);
for (struct evict_node *en = first_evicted; en; en = en->next_evicted) {
struct cache_node *n = en->cache_node;
n->next_free = G.cache.node_pool_first_free;
G.cache.node_pool_first_free = n;
struct cache_entry *n = en->cache_entry;
n->next_free = G.cache.entry_pool_first_free;
G.cache.entry_pool_first_free = n;
}
sys_mutex_unlock(&pool_lock);
}