simpler sprite evict sorting logic

This commit is contained in:
jacob 2025-05-14 03:08:03 -05:00
parent c3d0869707
commit d0be0d8908
5 changed files with 57 additions and 70 deletions

View File

@ -76,7 +76,7 @@ INTERNAL void refresh_dbg_table(void)
* Check returned slot->hash != 0 for presence. */
INTERNAL struct asset *asset_cache_get_slot_locked(struct sys_lock *lock, struct string key, u64 hash)
{
sys_assert_locked_s(lock, &G.lookup_mutex);
sys_assert_locked_e_or_s(lock, &G.lookup_mutex);
(UNUSED)lock;
u64 index = hash % ARRAY_COUNT(G.lookup);

View File

@ -3,6 +3,8 @@
#if PLATFORM_WINDOWS
/* TODO: Remove "..._raw" functions */
FORCE_INLINE i32 atomic_i32_eval(struct atomic_i32 *x) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); }
FORCE_INLINE i32 atomic_i32_inc_eval(struct atomic_i32 *x) { return (i32)_InterlockedIncrement((volatile long *)&x->_v); }
FORCE_INLINE i32 atomic_i32_dec_eval(struct atomic_i32 *x) { return (i32)_InterlockedDecrement((volatile long *)&x->_v); }

View File

@ -20,10 +20,10 @@
#define MAX_LOADER_THREADS 4
/* How long between evictor thread scans */
#define EVICTOR_CYCLE_INTERVAL_NS NS_FROM_SECONDS(0.5)
#define EVICTOR_CYCLE_INTERVAL_NS NS_FROM_SECONDS(0.500)
/* Time a cache entry spends unused until it's considered evictable (rounded up to multiple of of EVICTOR_CYCLE_INTERVAL) */
#define EVICTOR_GRACE_PERIOD_NS NS_FROM_SECONDS(10)
/* Cycles a cache entry spends unused until it's considered evictable */
#define EVICTOR_GRACE_PERIOD_CYCLES (NS_FROM_SECONDS(10.000) / EVICTOR_CYCLE_INTERVAL_NS)
#define TCTX_ARENA_RESERVE MEGABYTE(64)
@ -66,7 +66,7 @@ enum cache_node_state {
struct cache_node_refcount {
i32 count; /* Number of scopes currently holding a reference to this node */
u32 last_modified_cycle; /* Last time that refcount was modified */
i32 last_ref_cycle; /* Last time that refcount was modified */
};
CT_ASSERT(sizeof(struct cache_node_refcount) == 8); /* Must fit into 64 bit atomic */
@ -74,6 +74,7 @@ struct cache_node_hash {
u64 v;
};
/* See evictor thread comments for info on cache node lifetime */
struct cache_node {
enum cache_node_kind kind;
struct cache_node_hash hash;
@ -86,9 +87,6 @@ struct cache_node {
struct sprite_texture *texture;
struct sprite_sheet *sheet;
/* Work */
struct work_handle work;
/* Hash list */
struct cache_node *next_in_bin;
struct cache_node *prev_in_bin;
@ -140,7 +138,7 @@ GLOBAL struct {
struct load_cmd *first_free_load_cmd;
/* Evictor thread */
struct atomic_u32 evictor_cycle;
struct atomic_i32 evictor_cycle;
b32 evictor_shutdown;
struct sys_mutex evictor_mutex;
struct sys_condition_variable evictor_cv;
@ -273,6 +271,7 @@ struct sprite_startup_receipt sprite_startup(struct renderer_startup_receipt *re
G.evictor_mutex = sys_mutex_alloc();
G.evictor_cv = sys_condition_variable_alloc();
atomic_i32_eval_exchange(&G.evictor_cycle, 1);
G.evictor_thread = sys_thread_alloc(sprite_evictor_thread_entry_point, NULL, LIT("[P2] Sprite evictor"));
@ -328,14 +327,13 @@ INTERNAL struct cache_node_hash cache_node_hash_from_tag_hash(u64 tag_hash, enum
INTERNAL void node_refcount_add(struct cache_node *n, i32 amount)
{
u32 evictor_cycle = atomic_u32_eval(&G.evictor_cycle);
i32 evictor_cycle = atomic_i32_eval(&G.evictor_cycle);
struct atomic_u64 *refcount_atomic = &n->refcount_struct;
u64 old_refcount_uncast = atomic_u64_eval(refcount_atomic);
do {
struct cache_node_refcount new_refcount = *(struct cache_node_refcount *)&old_refcount_uncast;
new_refcount.count += amount;
new_refcount.last_modified_cycle = evictor_cycle;
CT_ASSERT(sizeof(new_refcount) == sizeof(u64));
new_refcount.last_ref_cycle = evictor_cycle;
u64 v = atomic_u64_eval_compare_exchange(refcount_atomic, old_refcount_uncast, *(u64 *)&new_refcount);
if (v != old_refcount_uncast) {
old_refcount_uncast = v;
@ -896,8 +894,9 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
default: { sys_panic(LIT("Unknown sprite cache node kind")); } break;
}
} else if (state == CACHE_NODE_STATE_NONE) {
/* If node is new, load texture */
if (atomic_i32_eval_compare_exchange(&n->state, CACHE_NODE_STATE_NONE, CACHE_NODE_STATE_QUEUEING) == CACHE_NODE_STATE_NONE) {
/* Node is new, load texture */
/* If caller is awaiting result then just load now on the calling thread. Otherwise spawn a work task. */
if (await) {
switch (kind) {
case CACHE_NODE_KIND_TEXTURE: {
@ -938,26 +937,18 @@ INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_
sys_mutex_unlock(&lock);
/* Push work */
n->work = work_push_task(&sprite_load_task, cmd, WORK_PRIORITY_NORMAL);
work_push_task(&sprite_load_task, cmd, WORK_PRIORITY_NORMAL);
atomic_i32_eval_compare_exchange(&n->state, CACHE_NODE_STATE_QUEUEING, CACHE_NODE_STATE_QUEUED);
}
}
}
/* TODO: Spinlock */
/* Spinlock until result is ready */
if (await && state != CACHE_NODE_STATE_LOADED) {
while (true) {
state = atomic_i32_eval(&n->state);
if (state == CACHE_NODE_STATE_LOADED) {
break;
} else if (state >= CACHE_NODE_STATE_QUEUED) {
work_wait(n->work);
} else {
/* Spinlock until work is ready to be waited on or sprite finishes loading */
while (atomic_i32_eval(&n->state) != CACHE_NODE_STATE_LOADED) {
ix_pause();
}
}
}
return res;
}
@ -1120,8 +1111,7 @@ INTERNAL RESOURCE_WATCH_CALLBACK_FUNC_DEF(sprite_resource_watch_callback, name)
* ========================== */
struct evict_node {
b32 force_evict;
struct cache_node_refcount refcount;
i32 last_ref_cycle;
struct cache_node *cache_node;
struct cache_bin *cache_bin;
@ -1133,18 +1123,25 @@ INTERNAL SORT_COMPARE_FUNC_DEF(evict_sort, arg_a, arg_b, udata)
(UNUSED)udata;
struct evict_node *a = arg_a;
struct evict_node *b = arg_b;
u64 refcount_uncast_a = atomic_u64_eval(&a->cache_node->refcount_struct);
u64 refcount_uncast_b = atomic_u64_eval(&b->cache_node->refcount_struct);
u32 cycle_a = ((struct cache_node_refcount *)&refcount_uncast_a)->last_modified_cycle;
u32 cycle_b = ((struct cache_node_refcount *)&refcount_uncast_b)->last_modified_cycle;
i32 res = (cycle_b > cycle_a) - (cycle_a > cycle_b);
res += ((a->force_evict > b->force_evict) - (a->force_evict < b->force_evict)) * 2;
return res;
i32 a_cycle = a->last_ref_cycle;
i32 b_cycle = b->last_ref_cycle;
return (b_cycle > a_cycle) - (a_cycle > b_cycle);
}
/* NOTE:
* A cache node is safe from eviction as long as:
* - Its bin mutex is locked (because eviction alters the bin's node list)
* - Any references are held to the node (its refcount > 0)
*
* Therefore to grab a reference to a node that may have no existing references,
* a lock on its bin mutex is required to prevent eviction while creating
* the reference.
*
* An attempt to evict a cache node will occur when:
* - Its refcount = 0 and
* - The cache is over its memory budget and the node's last reference is longer ago than the grace period
* - Resource reloading is enabled and the node is out of date due to a change to its original resource file
*/
INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
{
(UNUSED)arg;
@ -1157,7 +1154,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
struct evict_node *evict_array = arena_dry_push(scratch.arena, struct evict_node);
if (!G.evictor_shutdown) {
u32 cur_cycle = *atomic_u32_raw(&G.evictor_cycle);
i32 cur_cycle = atomic_i32_eval(&G.evictor_cycle);
/* Scan for evictable nodes */
b32 cache_over_budget = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET;
@ -1169,39 +1166,25 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
{
struct cache_node *n = bin->first;
while (n) {
b32 consider_for_eviction = false;
b32 force_evict = false;
u64 refcount_uncast = atomic_u64_eval(&n->refcount_struct);
struct cache_node_refcount refcount = *(struct cache_node_refcount *)&refcount_uncast;
if (refcount.count <= 0) {
#if RESOURCE_RELOADING
/* Force evict out-of-date sprites */
if (atomic_i32_eval(&n->out_of_date)) {
consider_for_eviction = true;
force_evict = true;
}
#endif
/* Check usage time */
if (cache_over_budget) {
u32 last_used_cycle = refcount.last_modified_cycle;
i64 time_since_use_ns = ((i64)cur_cycle - (i64)last_used_cycle) * EVICTOR_CYCLE_INTERVAL_NS;
if (time_since_use_ns > EVICTOR_GRACE_PERIOD_NS) {
/* Cache is over budget and node hasn't been referenced in a while */
consider_for_eviction = true;
}
}
}
/* Add node to evict list */
if (consider_for_eviction) {
#if RESOURCE_RELOADING
b32 is_out_of_date = atomic_i32_eval(&n->out_of_date);
#else
b32 is_out_of_date = false;
#endif
b32 is_old = cache_over_budget && ((cur_cycle - refcount.last_ref_cycle) > EVICTOR_GRACE_PERIOD_CYCLES);
if (is_old || is_out_of_date) {
struct evict_node *en = arena_push_zero(scratch.arena, struct evict_node);
en->cache_node = n;
en->cache_bin = bin;
en->refcount = refcount;
en->force_evict = force_evict;
en->last_ref_cycle = refcount.last_ref_cycle * !is_out_of_date; /* If out of date then set last cycle to 0 */
++evict_array_count;
}
}
n = n->next_in_bin;
}
@ -1229,11 +1212,13 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
struct cache_bin *bin = en->cache_bin;
struct cache_node *n = en->cache_node;
struct sys_lock bin_lock = sys_mutex_lock_e(&bin->mutex);
i32 last_ref_cycle = en->last_ref_cycle;
cache_over_budget = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET;
{
struct cache_node_refcount refcount = *(struct cache_node_refcount *)atomic_u64_raw(&n->refcount_struct);
if (refcount.count > 0 || ((refcount.last_modified_cycle != en->refcount.last_modified_cycle) && !en->force_evict)) {
/* Cache node has been referenced since scan, skip eviction. */
} else if (atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET || en->force_evict) {
if (refcount.count > 0 || (last_ref_cycle > 0 && refcount.last_ref_cycle != en->last_ref_cycle)) {
/* Cache node has been referenced since scan, skip node. */
} else if (cache_over_budget || last_ref_cycle == 0) {
/* Remove from cache bin */
if (n->prev_in_bin) {
n->prev_in_bin->next_in_bin = n->next_in_bin;
@ -1283,7 +1268,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
}
}
}
atomic_u32_inc_eval(&G.evictor_cycle);
atomic_i32_inc_eval(&G.evictor_cycle);
scratch_end(scratch);
/* Wait */

View File

@ -391,10 +391,10 @@ void sys_mutex_unlock(struct sys_lock *lock);
#if RTC
void sys_assert_locked_e(struct sys_lock *lock, struct sys_mutex *mutex);
void sys_assert_locked_s(struct sys_lock *lock, struct sys_mutex *mutex);
void sys_assert_locked_e_or_s(struct sys_lock *lock, struct sys_mutex *mutex);
#else
# define sys_assert_locked_e(l, m)
# define sys_assert_locked_s(l, m)
# define sys_assert_locked_e_or_s(l, m)
#endif
/* ========================== *

View File

@ -1652,7 +1652,7 @@ void sys_assert_locked_e(struct sys_lock *lock, struct sys_mutex *mutex)
ASSERT(lock->exclusive == true);
}
void sys_assert_locked_s(struct sys_lock *lock, struct sys_mutex *mutex)
void sys_assert_locked_e_or_s(struct sys_lock *lock, struct sys_mutex *mutex)
{
ASSERT(lock->mutex == mutex);
}