sprite cache memory budget threshold & target

This commit is contained in:
jacob 2025-05-14 05:03:23 -05:00
parent c93b2829ce
commit 5b13d4cd02

View File

@ -13,7 +13,13 @@
#include "math.h"
#include "rand.h"
#define CACHE_MEMORY_BUDGET (MEGABYTE(256))
/* The evictor will begin evicting once cache usage is > threshold.
* It will nodes until the budget has shrunk < target. */
/* TODO: Increase these for release (testing low values) */
#define CACHE_MEMORY_BUDGET_THRESHOLD (MEGABYTE(8))
#define CACHE_MEMORY_BUDGET_TARGET (MEGABYTE(4))
CT_ASSERT(CACHE_MEMORY_BUDGET_THRESHOLD >= CACHE_MEMORY_BUDGET_TARGET);
#define CACHE_BINS_COUNT 1024
#define MAX_SCOPE_REFERENCES 1024
@ -36,6 +42,7 @@
* ========================== */
struct load_cmd {
struct sprite_scope *scope; /* Holds a reference to the sprite being loaded */
struct load_cmd *next_free;
struct cache_node *cache_node;
struct sprite_tag tag;
@ -55,7 +62,6 @@ enum cache_node_kind {
enum cache_node_state {
CACHE_NODE_STATE_NONE,
CACHE_NODE_STATE_QUEUEING,
CACHE_NODE_STATE_QUEUED,
CACHE_NODE_STATE_WORKING,
CACHE_NODE_STATE_LOADED
@ -669,10 +675,10 @@ INTERNAL void refcount_add(struct cache_node *n, i32 amount)
}
/* Returns the slot at which the reference pointer should exist in the sprite scope.
* If the pointed to slot points to NULL, then the reference does not exist in the scope for the node. */
INTERNAL struct sprite_scope_reference **sprite_scope_reference_slot_from_node(struct sprite_scope *scope, struct cache_node *cache_node, u64 cache_bin_index)
* If the node is not already referenced by the scope then the returned pointed to slot will point to NULL. */
INTERNAL struct sprite_scope_reference **sprite_scope_reference_slot_from_node(struct sprite_scope *scope, struct cache_node *cache_node, u64 bin_index)
{
struct sprite_scope_reference **ref_slot = &scope->reference_bins[cache_bin_index];
struct sprite_scope_reference **ref_slot = &scope->reference_bins[bin_index];
while (*ref_slot) {
if ((*ref_slot)->cache_node == cache_node) {
/* Found reference in scope */
@ -684,30 +690,6 @@ INTERNAL struct sprite_scope_reference **sprite_scope_reference_slot_from_node(s
return ref_slot;
}
/* `ref_slot` is result from `sprite_scope_reference_slot_from_node` */
INTERNAL void sprite_scope_insert_reference(struct sprite_scope *scope, struct cache_node *cache_node, struct sprite_scope_reference **ref_slot)
{
if (scope->num_references >= MAX_SCOPE_REFERENCES) {
sys_panic(LIT("Max sprite scope references reached"));
}
ASSERT(*ref_slot == NULL); /* Ref slot should not already have a reference present */
/* Increment refcount */
refcount_add(cache_node, 1);
/* Grab reference from pool */
struct sprite_scope_reference *ref = &scope->reference_pool[scope->num_references++];
MEMZERO_STRUCT(ref);
ref->cache_node = cache_node;
if ((ref_slot) == &ref->next_in_bin) {
DEBUGBREAKABLE;
}
*ref_slot = ref;
}
struct sprite_scope *sprite_scope_begin(void)
{
/* Alloc scope */
@ -759,7 +741,27 @@ void sprite_scope_end(struct sprite_scope *scope)
* Cache interface
* ========================== */
INTERNAL struct cache_node *node_lookup_touch(struct sprite_scope *scope, struct sprite_tag tag, enum cache_node_kind kind)
/* `ref_slot` is result from `sprite_scope_reference_slot_from_node` */
INTERNAL void node_reference(struct cache_node *cache_node, struct sprite_scope *scope, struct sprite_scope_reference **ref_slot)
{
if (scope->num_references >= MAX_SCOPE_REFERENCES) {
sys_panic(LIT("Max sprite scope references reached"));
}
ASSERT(*ref_slot == NULL); /* Ref slot should not already have a reference present */
/* Increment refcount */
refcount_add(cache_node, 1);
/* Grab reference from pool */
struct sprite_scope_reference *ref = &scope->reference_pool[scope->num_references++];
MEMZERO_STRUCT(ref);
ref->cache_node = cache_node;
*ref_slot = ref;
}
INTERNAL struct cache_node *node_lookup_touch_and_reference(struct sprite_scope *scope, struct sprite_tag tag, enum cache_node_kind kind)
{
__prof;
@ -768,8 +770,8 @@ INTERNAL struct cache_node *node_lookup_touch(struct sprite_scope *scope, struct
struct cache_node **nonmatching_next = NULL;
struct cache_node_hash hash = cache_node_hash_from_tag_hash(tag.hash, kind);
u64 cache_bin_index = hash.v % CACHE_BINS_COUNT;
struct cache_bin *bin = &G.cache.bins[cache_bin_index];
u64 bin_index = hash.v % CACHE_BINS_COUNT;
struct cache_bin *bin = &G.cache.bins[bin_index];
/* Lookup */
/* TODO: Spinlock */
@ -780,7 +782,7 @@ INTERNAL struct cache_node *node_lookup_touch(struct sprite_scope *scope, struct
while (n) {
b32 match = false;
if (n->hash.v == hash.v) {
struct sprite_scope_reference **ref_slot = sprite_scope_reference_slot_from_node(scope, n, cache_bin_index);
struct sprite_scope_reference **ref_slot = sprite_scope_reference_slot_from_node(scope, n, bin_index);
#if RESOURCE_RELOADING
if (*ref_slot) {
@ -790,12 +792,12 @@ INTERNAL struct cache_node *node_lookup_touch(struct sprite_scope *scope, struct
/* If node is out of date and the scope doesn't already hold a reference to it, then ignore node */
} else {
match = true;
sprite_scope_insert_reference(scope, n, ref_slot);
node_reference(n, scope, ref_slot);
}
}
#else
if (!(*ref_slot)) {
*ref_slot = sprite_scope_insert_reference(scope, n);
node_reference(n, scope, ref_slot);
}
match = true;
#endif
@ -831,9 +833,9 @@ INTERNAL struct cache_node *node_lookup_touch(struct sprite_scope *scope, struct
}
/* Init node and add to bin */
struct sprite_scope_reference **ref_slot = sprite_scope_reference_slot_from_node(scope, n, cache_bin_index);
struct sprite_scope_reference **ref_slot = sprite_scope_reference_slot_from_node(scope, n, bin_index);
if (!(*ref_slot)) {
sprite_scope_insert_reference(scope, n, ref_slot);
node_reference(n, scope, ref_slot);
}
*nonmatching_next = n;
if (nonmatching) {
@ -861,7 +863,7 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
default: { sys_panic(LIT("Unknown sprite cache node kind")); } break;
}
struct cache_node *n = node_lookup_touch(scope, tag, kind);
struct cache_node *n = node_lookup_touch_and_reference(scope, tag, kind);
enum cache_node_state state = atomic_i32_eval(&n->state);
if (state == CACHE_NODE_STATE_LOADED) {
@ -872,7 +874,7 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
}
} else if (state == CACHE_NODE_STATE_NONE) {
/* If node is new, load texture */
if (atomic_i32_eval_compare_exchange(&n->state, CACHE_NODE_STATE_NONE, CACHE_NODE_STATE_QUEUEING) == CACHE_NODE_STATE_NONE) {
if (atomic_i32_eval_compare_exchange(&n->state, CACHE_NODE_STATE_NONE, CACHE_NODE_STATE_QUEUED) == CACHE_NODE_STATE_NONE) {
/* If caller is awaiting result then just load now on the calling thread. Otherwise spawn a work task. */
if (await) {
switch (kind) {
@ -889,33 +891,34 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
} else {
/* Allocate cmd */
struct load_cmd *cmd = NULL;
struct sys_lock lock = sys_mutex_lock_e(&G.load_cmds_mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.load_cmds_mutex);
if (G.first_free_load_cmd) {
cmd = G.first_free_load_cmd;
G.first_free_load_cmd = cmd->next_free;
MEMZERO_STRUCT(cmd);
} else {
cmd = arena_push_zero(&G.load_cmds_arena, struct load_cmd);
cmd = arena_push(&G.load_cmds_arena, struct load_cmd);
}
/* Initialize cmd */
cmd->cache_node = n;
cmd->tag = tag;
{
u64 copy_len = min_u64(tag.path.len, ARRAY_COUNT(cmd->tag_path_buff));
cmd->tag.path.text = cmd->tag_path_buff;
MEMCPY(cmd->tag.path.text, tag.path.text, copy_len);
}
/* Cmd holds reference to node */
refcount_add(n, 1);
sys_mutex_unlock(&lock);
}
MEMZERO_STRUCT(cmd);
u64 bin_index = n->hash.v % CACHE_BINS_COUNT;
/* Initialize cmd */
cmd->scope = sprite_scope_begin();
struct sprite_scope_reference **ref_slot = sprite_scope_reference_slot_from_node(cmd->scope, n, bin_index);
node_reference(n, cmd->scope, ref_slot);
cmd->cache_node = n;
cmd->tag = tag;
{
u64 copy_len = min_u64(tag.path.len, ARRAY_COUNT(cmd->tag_path_buff));
cmd->tag.path.text = cmd->tag_path_buff;
MEMCPY(cmd->tag.path.text, tag.path.text, copy_len);
}
sys_mutex_unlock(&lock);
/* Push work */
work_push_task(&sprite_load_task, cmd, WORK_PRIORITY_NORMAL);
atomic_i32_eval_compare_exchange(&n->state, CACHE_NODE_STATE_QUEUEING, CACHE_NODE_STATE_QUEUED);
}
}
}
@ -1046,13 +1049,13 @@ INTERNAL WORK_TASK_FUNC_DEF(sprite_load_task, arg)
}
/* Free cmd */
refcount_add(n, -1);
struct sys_lock lock = sys_mutex_lock_e(&G.load_cmds_mutex);
{
struct sys_lock lock = sys_mutex_lock_e(&G.load_cmds_mutex);
sprite_scope_end(cmd->scope);
cmd->next_free = G.first_free_load_cmd;
G.first_free_load_cmd = cmd;
sys_mutex_unlock(&lock);
}
sys_mutex_unlock(&lock);
}
/* ========================== *
@ -1066,8 +1069,8 @@ INTERNAL RESOURCE_WATCH_CALLBACK_FUNC_DEF(sprite_resource_watch_callback, name)
struct sprite_tag tag = sprite_tag_from_path(name);
for (enum cache_node_kind kind = 0; kind < NUM_CACHE_NODE_KINDS; ++kind) {
struct cache_node_hash hash = cache_node_hash_from_tag_hash(tag.hash, kind);
u64 cache_bin_index = hash.v % CACHE_BINS_COUNT;
struct cache_bin *bin = &G.cache.bins[cache_bin_index];
u64 bin_index = hash.v % CACHE_BINS_COUNT;
struct cache_bin *bin = &G.cache.bins[bin_index];
struct sys_lock lock = sys_mutex_lock_s(&bin->mutex);
{
for (struct cache_node *n = bin->first; n; n = n->next_in_bin) {
@ -1107,13 +1110,9 @@ INTERNAL SORT_COMPARE_FUNC_DEF(evict_sort, arg_a, arg_b, udata)
/* NOTE:
* A cache node is safe from eviction as long as:
* - Its bin mutex is locked (because eviction alters the bin's node list)
* - Its bin mutex is locked
* - Any references are held to the node (its refcount > 0)
*
* Therefore to grab a reference to a node that may have no existing references,
* a lock on its bin mutex is required to prevent eviction while creating
* the reference.
*
* An attempt to evict a cache node will occur when:
* - Its refcount = 0 and
* - The cache is over its memory budget and the node's last reference is longer ago than the grace period
@ -1134,8 +1133,8 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
i32 cur_cycle = atomic_i32_eval(&G.evictor_cycle);
/* Scan for evictable nodes */
b32 cache_over_budget = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET;
if (cache_over_budget || RESOURCE_RELOADING) {
b32 cache_over_budget_threshold = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET_THRESHOLD;
if (cache_over_budget_threshold || RESOURCE_RELOADING) {
__profscope(eviction_scan);
for (u64 i = 0; i < CACHE_BINS_COUNT; ++i) {
struct cache_bin *bin = &G.cache.bins[i];
@ -1152,7 +1151,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
#else
b32 is_out_of_date = false;
#endif
b32 is_old = cache_over_budget && ((cur_cycle - refcount.last_ref_cycle) > EVICTOR_GRACE_PERIOD_CYCLES);
b32 is_old = cache_over_budget_threshold && ((cur_cycle - refcount.last_ref_cycle) > EVICTOR_GRACE_PERIOD_CYCLES);
if (is_old || is_out_of_date) {
struct evict_node *en = arena_push_zero(scratch.arena, struct evict_node);
en->cache_node = n;
@ -1189,12 +1188,12 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
struct cache_node *n = en->cache_node;
struct sys_lock bin_lock = sys_mutex_lock_e(&bin->mutex);
i32 last_ref_cycle = en->last_ref_cycle;
cache_over_budget = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET;
b32 cache_over_budget_target = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET_TARGET;
{
struct cache_node_refcount refcount = *(struct cache_node_refcount *)atomic_u64_raw(&n->refcount_struct);
if (refcount.count > 0 || (last_ref_cycle > 0 && refcount.last_ref_cycle != en->last_ref_cycle)) {
/* Cache node has been referenced since scan, skip node. */
} else if (cache_over_budget || last_ref_cycle == 0) {
} else if (cache_over_budget_target || last_ref_cycle == 0) {
/* Remove from cache bin */
if (n->prev_in_bin) {
n->prev_in_bin->next_in_bin = n->next_in_bin;