simpler sprite evict sorting logic

This commit is contained in:
jacob 2025-05-14 03:08:03 -05:00
parent c3d0869707
commit d0be0d8908
5 changed files with 57 additions and 70 deletions

View File

@ -76,7 +76,7 @@ INTERNAL void refresh_dbg_table(void)
* Check returned slot->hash != 0 for presence. */ * Check returned slot->hash != 0 for presence. */
INTERNAL struct asset *asset_cache_get_slot_locked(struct sys_lock *lock, struct string key, u64 hash) INTERNAL struct asset *asset_cache_get_slot_locked(struct sys_lock *lock, struct string key, u64 hash)
{ {
sys_assert_locked_s(lock, &G.lookup_mutex); sys_assert_locked_e_or_s(lock, &G.lookup_mutex);
(UNUSED)lock; (UNUSED)lock;
u64 index = hash % ARRAY_COUNT(G.lookup); u64 index = hash % ARRAY_COUNT(G.lookup);

View File

@ -3,6 +3,8 @@
#if PLATFORM_WINDOWS #if PLATFORM_WINDOWS
/* TODO: Remove "..._raw" functions */
FORCE_INLINE i32 atomic_i32_eval(struct atomic_i32 *x) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); } FORCE_INLINE i32 atomic_i32_eval(struct atomic_i32 *x) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); }
FORCE_INLINE i32 atomic_i32_inc_eval(struct atomic_i32 *x) { return (i32)_InterlockedIncrement((volatile long *)&x->_v); } FORCE_INLINE i32 atomic_i32_inc_eval(struct atomic_i32 *x) { return (i32)_InterlockedIncrement((volatile long *)&x->_v); }
FORCE_INLINE i32 atomic_i32_dec_eval(struct atomic_i32 *x) { return (i32)_InterlockedDecrement((volatile long *)&x->_v); } FORCE_INLINE i32 atomic_i32_dec_eval(struct atomic_i32 *x) { return (i32)_InterlockedDecrement((volatile long *)&x->_v); }

View File

@ -20,10 +20,10 @@
#define MAX_LOADER_THREADS 4 #define MAX_LOADER_THREADS 4
/* How long between evictor thread scans */ /* How long between evictor thread scans */
#define EVICTOR_CYCLE_INTERVAL_NS NS_FROM_SECONDS(0.5) #define EVICTOR_CYCLE_INTERVAL_NS NS_FROM_SECONDS(0.500)
/* Time a cache entry spends unused until it's considered evictable (rounded up to multiple of of EVICTOR_CYCLE_INTERVAL) */ /* Cycles a cache entry spends unused until it's considered evictable */
#define EVICTOR_GRACE_PERIOD_NS NS_FROM_SECONDS(10) #define EVICTOR_GRACE_PERIOD_CYCLES (NS_FROM_SECONDS(10.000) / EVICTOR_CYCLE_INTERVAL_NS)
#define TCTX_ARENA_RESERVE MEGABYTE(64) #define TCTX_ARENA_RESERVE MEGABYTE(64)
@ -66,7 +66,7 @@ enum cache_node_state {
struct cache_node_refcount { struct cache_node_refcount {
i32 count; /* Number of scopes currently holding a reference to this node */ i32 count; /* Number of scopes currently holding a reference to this node */
u32 last_modified_cycle; /* Last time that refcount was modified */ i32 last_ref_cycle; /* Last time that refcount was modified */
}; };
CT_ASSERT(sizeof(struct cache_node_refcount) == 8); /* Must fit into 64 bit atomic */ CT_ASSERT(sizeof(struct cache_node_refcount) == 8); /* Must fit into 64 bit atomic */
@ -74,6 +74,7 @@ struct cache_node_hash {
u64 v; u64 v;
}; };
/* See evictor thread comments for info on cache node lifetime */
struct cache_node { struct cache_node {
enum cache_node_kind kind; enum cache_node_kind kind;
struct cache_node_hash hash; struct cache_node_hash hash;
@ -86,9 +87,6 @@ struct cache_node {
struct sprite_texture *texture; struct sprite_texture *texture;
struct sprite_sheet *sheet; struct sprite_sheet *sheet;
/* Work */
struct work_handle work;
/* Hash list */ /* Hash list */
struct cache_node *next_in_bin; struct cache_node *next_in_bin;
struct cache_node *prev_in_bin; struct cache_node *prev_in_bin;
@ -140,7 +138,7 @@ GLOBAL struct {
struct load_cmd *first_free_load_cmd; struct load_cmd *first_free_load_cmd;
/* Evictor thread */ /* Evictor thread */
struct atomic_u32 evictor_cycle; struct atomic_i32 evictor_cycle;
b32 evictor_shutdown; b32 evictor_shutdown;
struct sys_mutex evictor_mutex; struct sys_mutex evictor_mutex;
struct sys_condition_variable evictor_cv; struct sys_condition_variable evictor_cv;
@ -273,6 +271,7 @@ struct sprite_startup_receipt sprite_startup(struct renderer_startup_receipt *re
G.evictor_mutex = sys_mutex_alloc(); G.evictor_mutex = sys_mutex_alloc();
G.evictor_cv = sys_condition_variable_alloc(); G.evictor_cv = sys_condition_variable_alloc();
atomic_i32_eval_exchange(&G.evictor_cycle, 1);
G.evictor_thread = sys_thread_alloc(sprite_evictor_thread_entry_point, NULL, LIT("[P2] Sprite evictor")); G.evictor_thread = sys_thread_alloc(sprite_evictor_thread_entry_point, NULL, LIT("[P2] Sprite evictor"));
@ -328,14 +327,13 @@ INTERNAL struct cache_node_hash cache_node_hash_from_tag_hash(u64 tag_hash, enum
INTERNAL void node_refcount_add(struct cache_node *n, i32 amount) INTERNAL void node_refcount_add(struct cache_node *n, i32 amount)
{ {
u32 evictor_cycle = atomic_u32_eval(&G.evictor_cycle); i32 evictor_cycle = atomic_i32_eval(&G.evictor_cycle);
struct atomic_u64 *refcount_atomic = &n->refcount_struct; struct atomic_u64 *refcount_atomic = &n->refcount_struct;
u64 old_refcount_uncast = atomic_u64_eval(refcount_atomic); u64 old_refcount_uncast = atomic_u64_eval(refcount_atomic);
do { do {
struct cache_node_refcount new_refcount = *(struct cache_node_refcount *)&old_refcount_uncast; struct cache_node_refcount new_refcount = *(struct cache_node_refcount *)&old_refcount_uncast;
new_refcount.count += amount; new_refcount.count += amount;
new_refcount.last_modified_cycle = evictor_cycle; new_refcount.last_ref_cycle = evictor_cycle;
CT_ASSERT(sizeof(new_refcount) == sizeof(u64));
u64 v = atomic_u64_eval_compare_exchange(refcount_atomic, old_refcount_uncast, *(u64 *)&new_refcount); u64 v = atomic_u64_eval_compare_exchange(refcount_atomic, old_refcount_uncast, *(u64 *)&new_refcount);
if (v != old_refcount_uncast) { if (v != old_refcount_uncast) {
old_refcount_uncast = v; old_refcount_uncast = v;
@ -896,8 +894,9 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
default: { sys_panic(LIT("Unknown sprite cache node kind")); } break; default: { sys_panic(LIT("Unknown sprite cache node kind")); } break;
} }
} else if (state == CACHE_NODE_STATE_NONE) { } else if (state == CACHE_NODE_STATE_NONE) {
/* If node is new, load texture */
if (atomic_i32_eval_compare_exchange(&n->state, CACHE_NODE_STATE_NONE, CACHE_NODE_STATE_QUEUEING) == CACHE_NODE_STATE_NONE) { if (atomic_i32_eval_compare_exchange(&n->state, CACHE_NODE_STATE_NONE, CACHE_NODE_STATE_QUEUEING) == CACHE_NODE_STATE_NONE) {
/* Node is new, load texture */ /* If caller is awaiting result then just load now on the calling thread. Otherwise spawn a work task. */
if (await) { if (await) {
switch (kind) { switch (kind) {
case CACHE_NODE_KIND_TEXTURE: { case CACHE_NODE_KIND_TEXTURE: {
@ -938,26 +937,18 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
sys_mutex_unlock(&lock); sys_mutex_unlock(&lock);
/* Push work */ /* Push work */
n->work = work_push_task(&sprite_load_task, cmd, WORK_PRIORITY_NORMAL); work_push_task(&sprite_load_task, cmd, WORK_PRIORITY_NORMAL);
atomic_i32_eval_compare_exchange(&n->state, CACHE_NODE_STATE_QUEUEING, CACHE_NODE_STATE_QUEUED); atomic_i32_eval_compare_exchange(&n->state, CACHE_NODE_STATE_QUEUEING, CACHE_NODE_STATE_QUEUED);
} }
} }
} }
/* TODO: Spinlock */ /* Spinlock until result is ready */
if (await && state != CACHE_NODE_STATE_LOADED) { if (await && state != CACHE_NODE_STATE_LOADED) {
while (true) { while (atomic_i32_eval(&n->state) != CACHE_NODE_STATE_LOADED) {
state = atomic_i32_eval(&n->state);
if (state == CACHE_NODE_STATE_LOADED) {
break;
} else if (state >= CACHE_NODE_STATE_QUEUED) {
work_wait(n->work);
} else {
/* Spinlock until work is ready to be waited on or sprite finishes loading */
ix_pause(); ix_pause();
} }
} }
}
return res; return res;
} }
@ -1120,8 +1111,7 @@ INTERNAL RESOURCE_WATCH_CALLBACK_FUNC_DEF(sprite_resource_watch_callback, name)
* ========================== */ * ========================== */
struct evict_node { struct evict_node {
b32 force_evict; i32 last_ref_cycle;
struct cache_node_refcount refcount;
struct cache_node *cache_node; struct cache_node *cache_node;
struct cache_bin *cache_bin; struct cache_bin *cache_bin;
@ -1133,18 +1123,25 @@ INTERNAL SORT_COMPARE_FUNC_DEF(evict_sort, arg_a, arg_b, udata)
(UNUSED)udata; (UNUSED)udata;
struct evict_node *a = arg_a; struct evict_node *a = arg_a;
struct evict_node *b = arg_b; struct evict_node *b = arg_b;
i32 a_cycle = a->last_ref_cycle;
u64 refcount_uncast_a = atomic_u64_eval(&a->cache_node->refcount_struct); i32 b_cycle = b->last_ref_cycle;
u64 refcount_uncast_b = atomic_u64_eval(&b->cache_node->refcount_struct); return (b_cycle > a_cycle) - (a_cycle > b_cycle);
u32 cycle_a = ((struct cache_node_refcount *)&refcount_uncast_a)->last_modified_cycle;
u32 cycle_b = ((struct cache_node_refcount *)&refcount_uncast_b)->last_modified_cycle;
i32 res = (cycle_b > cycle_a) - (cycle_a > cycle_b);
res += ((a->force_evict > b->force_evict) - (a->force_evict < b->force_evict)) * 2;
return res;
} }
/* NOTE:
* A cache node is safe from eviction as long as:
* - Its bin mutex is locked (because eviction alters the bin's node list)
* - Any references are held to the node (its refcount > 0)
*
* Therefore to grab a reference to a node that may have no existing references,
* a lock on its bin mutex is required to prevent eviction while creating
* the reference.
*
* An attempt to evict a cache node will occur when:
* - Its refcount = 0 and
* - The cache is over its memory budget and the node's last reference is longer ago than the grace period
* - Resource reloading is enabled and the node is out of date due to a change to its original resource file
*/
INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg) INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
{ {
(UNUSED)arg; (UNUSED)arg;
@ -1157,7 +1154,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
struct evict_node *evict_array = arena_dry_push(scratch.arena, struct evict_node); struct evict_node *evict_array = arena_dry_push(scratch.arena, struct evict_node);
if (!G.evictor_shutdown) { if (!G.evictor_shutdown) {
u32 cur_cycle = *atomic_u32_raw(&G.evictor_cycle); i32 cur_cycle = atomic_i32_eval(&G.evictor_cycle);
/* Scan for evictable nodes */ /* Scan for evictable nodes */
b32 cache_over_budget = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET; b32 cache_over_budget = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET;
@ -1169,39 +1166,25 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
{ {
struct cache_node *n = bin->first; struct cache_node *n = bin->first;
while (n) { while (n) {
b32 consider_for_eviction = false;
b32 force_evict = false;
u64 refcount_uncast = atomic_u64_eval(&n->refcount_struct); u64 refcount_uncast = atomic_u64_eval(&n->refcount_struct);
struct cache_node_refcount refcount = *(struct cache_node_refcount *)&refcount_uncast; struct cache_node_refcount refcount = *(struct cache_node_refcount *)&refcount_uncast;
if (refcount.count <= 0) { if (refcount.count <= 0) {
#if RESOURCE_RELOADING
/* Force evict out-of-date sprites */
if (atomic_i32_eval(&n->out_of_date)) {
consider_for_eviction = true;
force_evict = true;
}
#endif
/* Check usage time */
if (cache_over_budget) {
u32 last_used_cycle = refcount.last_modified_cycle;
i64 time_since_use_ns = ((i64)cur_cycle - (i64)last_used_cycle) * EVICTOR_CYCLE_INTERVAL_NS;
if (time_since_use_ns > EVICTOR_GRACE_PERIOD_NS) {
/* Cache is over budget and node hasn't been referenced in a while */
consider_for_eviction = true;
}
}
}
/* Add node to evict list */ /* Add node to evict list */
if (consider_for_eviction) { #if RESOURCE_RELOADING
b32 is_out_of_date = atomic_i32_eval(&n->out_of_date);
#else
b32 is_out_of_date = false;
#endif
b32 is_old = cache_over_budget && ((cur_cycle - refcount.last_ref_cycle) > EVICTOR_GRACE_PERIOD_CYCLES);
if (is_old || is_out_of_date) {
struct evict_node *en = arena_push_zero(scratch.arena, struct evict_node); struct evict_node *en = arena_push_zero(scratch.arena, struct evict_node);
en->cache_node = n; en->cache_node = n;
en->cache_bin = bin; en->cache_bin = bin;
en->refcount = refcount; en->last_ref_cycle = refcount.last_ref_cycle * !is_out_of_date; /* If out of date then set last cycle to 0 */
en->force_evict = force_evict;
++evict_array_count; ++evict_array_count;
} }
}
n = n->next_in_bin; n = n->next_in_bin;
} }
@ -1229,11 +1212,13 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
struct cache_bin *bin = en->cache_bin; struct cache_bin *bin = en->cache_bin;
struct cache_node *n = en->cache_node; struct cache_node *n = en->cache_node;
struct sys_lock bin_lock = sys_mutex_lock_e(&bin->mutex); struct sys_lock bin_lock = sys_mutex_lock_e(&bin->mutex);
i32 last_ref_cycle = en->last_ref_cycle;
cache_over_budget = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET;
{ {
struct cache_node_refcount refcount = *(struct cache_node_refcount *)atomic_u64_raw(&n->refcount_struct); struct cache_node_refcount refcount = *(struct cache_node_refcount *)atomic_u64_raw(&n->refcount_struct);
if (refcount.count > 0 || ((refcount.last_modified_cycle != en->refcount.last_modified_cycle) && !en->force_evict)) { if (refcount.count > 0 || (last_ref_cycle > 0 && refcount.last_ref_cycle != en->last_ref_cycle)) {
/* Cache node has been referenced since scan, skip eviction. */ /* Cache node has been referenced since scan, skip node. */
} else if (atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET || en->force_evict) { } else if (cache_over_budget || last_ref_cycle == 0) {
/* Remove from cache bin */ /* Remove from cache bin */
if (n->prev_in_bin) { if (n->prev_in_bin) {
n->prev_in_bin->next_in_bin = n->next_in_bin; n->prev_in_bin->next_in_bin = n->next_in_bin;
@ -1283,7 +1268,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
} }
} }
} }
atomic_u32_inc_eval(&G.evictor_cycle); atomic_i32_inc_eval(&G.evictor_cycle);
scratch_end(scratch); scratch_end(scratch);
/* Wait */ /* Wait */

View File

@ -391,10 +391,10 @@ void sys_mutex_unlock(struct sys_lock *lock);
#if RTC #if RTC
void sys_assert_locked_e(struct sys_lock *lock, struct sys_mutex *mutex); void sys_assert_locked_e(struct sys_lock *lock, struct sys_mutex *mutex);
void sys_assert_locked_s(struct sys_lock *lock, struct sys_mutex *mutex); void sys_assert_locked_e_or_s(struct sys_lock *lock, struct sys_mutex *mutex);
#else #else
# define sys_assert_locked_e(l, m) # define sys_assert_locked_e(l, m)
# define sys_assert_locked_s(l, m) # define sys_assert_locked_e_or_s(l, m)
#endif #endif
/* ========================== * /* ========================== *

View File

@ -1652,7 +1652,7 @@ void sys_assert_locked_e(struct sys_lock *lock, struct sys_mutex *mutex)
ASSERT(lock->exclusive == true); ASSERT(lock->exclusive == true);
} }
void sys_assert_locked_s(struct sys_lock *lock, struct sys_mutex *mutex) void sys_assert_locked_e_or_s(struct sys_lock *lock, struct sys_mutex *mutex)
{ {
ASSERT(lock->mutex == mutex); ASSERT(lock->mutex == mutex);
} }