power_play/src/sheet.c

842 lines
29 KiB
C

#include "sheet.h"
#include "arena.h"
#include "log.h"
#include "sys.h"
#include "scratch.h"
#include "resource.h"
#include "ase.h"
#include "util.h"
#include "work.h"
#include "atomic.h"
#include "thread_local.h"
#include "app.h"
#define SHEET_ARENA_RESERVE MEGABYTE(64)
#define SHEET_LOOKUP_TABLE_BUCKET_RATIO 2.0
#define TCTX_ARENA_RESERVE MEGABYTE(64)
#define CACHE_BUCKETS_COUNT 1024
#define MAX_LOADER_THREADS 4
/* How long between evictor thread scans */
#define EVICTOR_CYCLE_INTERVAL (RESOURCE_RELOADING ? 0.100 : 0.500)
/* Time a cache entry spends unused until it's considered evictable (rounded up to multiple of of EVICTOR_CYCLE_INTERVAL) */
#define EVICTOR_GRACE_PERIOD 10.000
/* ========================== *
* Loader cmd structs
* ========================== */
struct loader_cmd {
struct loader_cmd *next;
struct loader_cmd *next_free;
struct cache_node *cache_node;
struct sheet_tag tag;
u8 tag_path_buff[4096];
};
/* ========================== *
* Cache structs
* ========================== */
enum cache_node_state {
CACHE_NODE_STATE_NONE,
CACHE_NODE_STATE_QUEUED,
CACHE_NODE_STATE_WORKING,
CACHE_NODE_STATE_LOADED
};
struct cache_node_refcount {
i32 count; /* Number of scopes currently holding a reference to this sheet */
u32 last_modified_cycle; /* Last time that refcount was modified */
};
CT_ASSERT(sizeof(struct cache_node_refcount) == 8); /* Must fit into 64 bit atomic */
struct cache_node {
u128 hash;
struct atomic_u32 state;
struct atomic_u64 refcount_struct; /* Cast eval to `cache_node_refcount` */
/* Allocated data */
u64 memory_usage;
struct arena arena;
struct sheet *sheet;
/* Hash list */
struct cache_node *next_hash;
struct cache_node *prev_hash;
/* Free list */
struct cache_node *next_free;
#if RESOURCE_RELOADING
struct sys_datetime initial_resource_file_modified_time;
u64 tag_path_len;
u8 tag_path[4096];
#endif
};
struct cache_bucket {
struct sys_rw_mutex rw_mutex;
struct cache_node *first;
};
struct cache {
struct atomic_u64 memory_usage;
struct arena arena;
struct cache_bucket *buckets;
struct sys_mutex node_pool_mutex;
struct cache_node *node_pool_first_free;
};
struct sheet_scope_reference {
struct cache_node *cache_node;
struct sheet_scope_reference *next_hash;
struct sheet_scope_reference *next_free;
};
/* ========================== *
* Global state
* ========================== */
GLOBAL struct {
/* Cache */
struct cache cache;
/* Loader threads */
b32 loaders_shutdown;
struct sys_mutex loaders_mutex;
struct sys_condition_variable loaders_cv;
struct arena loader_cmd_arena;
struct loader_cmd *first_free_loader_cmd;
struct loader_cmd *first_loader_cmd;
struct loader_cmd *last_loader_cmd;
u64 loader_threads_count;
struct sys_thread loader_threads[MAX_LOADER_THREADS];
/* Evictor thread */
struct atomic_u32 evictor_cycle;
b32 evictor_shutdown;
struct sys_mutex evictor_mutex;
struct sys_condition_variable evictor_cv;
struct sys_thread evictor_thread;
} G = { 0 }, DEBUG_ALIAS(G, G_sheet);
GLOBAL READONLY struct sheet g_sheet_nil = { 0 };
GLOBAL READONLY struct sheet g_sheet_loading = {
.loading = true
};
/* ========================== *
* Thread local state
* ========================== */
struct sheet_tctx {
struct arena arena;
struct sheet_scope *first_free_scope;
struct sheet_scope_reference *first_free_reference;
};
INTERNAL THREAD_LOCAL_VAR_ALLOC_FUNC_DEF(sheet_tctx_alloc, vtctx)
{
struct sheet_tctx *tctx = (struct sheet_tctx *)vtctx;
tctx->arena = arena_alloc(MEGABYTE(64));
}
INTERNAL THREAD_LOCAL_VAR_RELEASE_FUNC_DEF(sheet_tctx_release, vtctx)
{
struct sheet_tctx *tctx = (struct sheet_tctx *)vtctx;
arena_release(&tctx->arena);
}
GLOBAL THREAD_LOCAL_VAR_DEF(tl_sheet_tctx, struct sheet_tctx, sheet_tctx_alloc, sheet_tctx_release);
/* ========================== *
* Startup
* ========================== */
INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(sheet_shutdown);
INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_loader_thread_entry_point, arg);
INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_evictor_thread_entry_point, arg);
struct sheet_startup_receipt sheet_startup(struct resource_startup_receipt *resource_sr)
{
(UNUSED)resource_sr;
G.cache.node_pool_mutex = sys_mutex_alloc();
G.cache.arena = arena_alloc(GIGABYTE(64));
G.cache.buckets = arena_push_array_zero(&G.cache.arena, struct cache_bucket, CACHE_BUCKETS_COUNT);
for (u64 i = 0; i < CACHE_BUCKETS_COUNT; ++i) {
G.cache.buckets[i].rw_mutex = sys_rw_mutex_alloc();
}
G.loader_cmd_arena = arena_alloc(GIGABYTE(64));
G.loaders_mutex = sys_mutex_alloc();
G.loaders_cv = sys_condition_variable_alloc();
G.evictor_mutex = sys_mutex_alloc();
G.evictor_cv = sys_condition_variable_alloc();
{
struct temp_arena scratch = scratch_begin_no_conflict();
G.loader_threads_count = clamp_i64(1, MAX_LOADER_THREADS, sys_num_logical_processors() - 1);
for (u64 i = 0; i < G.loader_threads_count; ++i) {
struct string thread_name = string_format(scratch.arena,
STR("[P0] Sheet loader %F"),
FMT_UINT(i));
G.loader_threads[i] = sys_thread_alloc(sheet_loader_thread_entry_point, NULL, thread_name);
}
scratch_end(scratch);
}
G.evictor_thread = sys_thread_alloc(sheet_evictor_thread_entry_point, NULL, STR("[P0] Sheet evictor"));
app_register_exit_callback(&sheet_shutdown);
return (struct sheet_startup_receipt) { 0 };
}
INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(sheet_shutdown)
{
__prof;
/* Signal loaders shutdown */
sys_mutex_lock(&G.loaders_mutex);
{
G.loaders_shutdown = true;
sys_condition_variable_broadcast(&G.loaders_cv);
}
sys_mutex_unlock(&G.loaders_mutex);
/* Signal evictor shutdown */
sys_mutex_lock(&G.evictor_mutex);
{
G.evictor_shutdown = true;
sys_condition_variable_broadcast(&G.evictor_cv);
}
sys_mutex_unlock(&G.evictor_mutex);
/* Wait on threads */
for (u64 i = 0; i < G.loader_threads_count; ++i) {
sys_thread_wait_release(&G.loader_threads[i]);
}
sys_thread_wait_release(&G.evictor_thread);
}
/* ========================== *
* Tag
* ========================== */
struct sheet_tag sheet_tag_from_path(struct string path)
{
struct sheet_tag res = { 0 };
res.hash = HASH_FNV128_BASIS;
res.hash = hash_fnv128(res.hash, BUFFER_FROM_STRING(path));
res.path = path;
return res;
}
/* ========================== *
* Refcount
* ========================== */
INTERNAL void node_refcount_add(struct cache_node *n, i32 amount)
{
u32 evictor_cycle = atomic_u32_eval(&G.evictor_cycle);
struct atomic_u64 *refcount_atomic = &n->refcount_struct;
u64 old_refcount_uncast = atomic_u64_eval(refcount_atomic);
do {
struct cache_node_refcount new_refcount = *(struct cache_node_refcount *)&old_refcount_uncast;
new_refcount.count += amount;
new_refcount.last_modified_cycle = evictor_cycle;
u64 v = atomic_u64_eval_compare_exchange(refcount_atomic, old_refcount_uncast, *(u64 *)&new_refcount);
if (v != old_refcount_uncast) {
old_refcount_uncast = v;
} else {
break;
}
} while (true);
}
/* ========================== *
* Load
* ========================== */
INTERNAL struct sheet init_sheet_from_ase_result(struct arena *arena, struct ase_decode_sheet_result ase)
{
struct sheet sheet = { 0 };
ASSERT(ase.num_frames >= 1);
/* Init frames */
sheet.image_size = ase.image_size;
sheet.frame_size = ase.frame_size;
sheet.frames = arena_push_array_zero(arena, struct sheet_frame, ase.num_frames);
sheet.frames_count = ase.num_frames;
for (struct ase_frame *ase_frame = ase.frame_head; ase_frame; ase_frame = ase_frame->next) {
u32 index = ase_frame->index;
sheet.frames[index] = (struct sheet_frame) {
.index = index,
.duration = ase_frame->duration,
.clip = ase_frame->clip
};
}
/* Init spans */
sheet.spans_count = ase.num_spans;
if (ase.num_spans > 0) {
sheet.spans_dict = fixed_dict_init(arena, (u64)(ase.num_spans * SHEET_LOOKUP_TABLE_BUCKET_RATIO));
for (struct ase_span *ase_span = ase.span_head; ase_span; ase_span = ase_span->next) {
struct string name = string_copy(arena, ase_span->name);
struct sheet_span *span = arena_push(arena, struct sheet_span);
*span = (struct sheet_span) {
.name = name,
.start = ase_span->start,
.end = ase_span->end
};
fixed_dict_set(arena, &sheet.spans_dict, name, span);
}
}
return sheet;
}
INTERNAL void sheet_load(struct cache_node *n, struct sheet_tag tag)
{
__prof;
struct temp_arena scratch = scratch_begin_no_conflict();
atomic_u32_eval_exchange(&n->state, CACHE_NODE_STATE_WORKING);
struct string path = tag.path;
logf_info("Loading sheet \"%F\"", FMT_STR(path));
sys_timestamp_t start_ts = sys_timestamp();
ASSERT(string_ends_with(path, STR(".ase")));
n->arena = arena_alloc(SHEET_ARENA_RESERVE);
{
/* Decode */
struct ase_decode_sheet_result decoded = { 0 };
if (resource_exists(path)) {
struct resource sheet_rs = resource_open(path);
decoded = ase_decode_sheet(scratch.arena, sheet_rs.bytes);
#if RESOURCE_RELOADING
n->initial_resource_file_modified_time = sys_file_get_time(sheet_rs.file).modified;
#endif
resource_close(sheet_rs);
/* Initialize */
n->sheet = arena_push(&n->arena, struct sheet);
*n->sheet = init_sheet_from_ase_result(&n->arena, decoded);
} else {
n->sheet = &g_sheet_nil;
logf_error("Resource \"%F\" not found", path);
}
}
#if RESOURCE_RELOADING
u64 cpy_len = min_u64(tag.path.len, ARRAY_COUNT(n->tag_path));
n->tag_path_len = cpy_len;
MEMCPY(n->tag_path, tag.path.text, cpy_len);
#endif
arena_set_readonly(&n->arena);
n->memory_usage = n->arena.committed;
atomic_u64_eval_add(&G.cache.memory_usage, n->memory_usage);
logf_info("Finished loading sheet \"%F\" in %F seconds (cache size: %F bytes).",
FMT_STR(path),
FMT_FLOAT(sys_timestamp_seconds(sys_timestamp() - start_ts)),
FMT_UINT(n->memory_usage));
atomic_u32_eval_exchange(&n->state, CACHE_NODE_STATE_LOADED);
scratch_end(scratch);
}
/* ========================== *
* Scope
* ========================== */
INTERNAL void scope_ensure_reference(struct sheet_scope *scope, struct cache_node *cache_node, u64 cache_bucket_index)
{
__prof;
struct sheet_scope_reference **ref_next = &scope->reference_buckets[cache_bucket_index];
struct sheet_scope_reference *ref = *ref_next;
while (ref) {
if (ref->cache_node == cache_node) {
/* Scope already references node */
break;
} else {
ref_next = &ref->next_hash;
ref = *ref_next;
}
}
if (!ref) {
/* Increment refcount */
node_refcount_add(cache_node, 1);
/* Add reference to scope */
struct sheet_tctx *tctx = thread_local_var_eval(&tl_sheet_tctx);
if (tctx->first_free_reference) {
ref = tctx->first_free_reference;
tctx->first_free_reference = ref->next_free;
MEMZERO_STRUCT(ref);
} else {
ref = arena_push_zero(&tctx->arena, struct sheet_scope_reference);
}
ref->cache_node = cache_node;
*ref_next = ref;
}
}
struct sheet_scope *sheet_scope_begin(void)
{
struct sheet_tctx *tctx = thread_local_var_eval(&tl_sheet_tctx);
struct sheet_scope *res = NULL;
if (tctx->first_free_scope) {
res = tctx->first_free_scope;
tctx->first_free_scope = res->next_free;
MEMZERO(res->reference_buckets, sizeof(*res->reference_buckets) * CACHE_BUCKETS_COUNT);
*res = (struct sheet_scope) {
.reference_buckets = res->reference_buckets
};
} else {
res = arena_push_zero(&tctx->arena, struct sheet_scope);
res->reference_buckets = arena_push_array_zero(&tctx->arena, struct sheet_scope_reference *, CACHE_BUCKETS_COUNT);
}
return res;
}
void sheet_scope_end(struct sheet_scope *scope)
{
struct sheet_tctx *tctx = thread_local_var_eval(&tl_sheet_tctx);
for (u64 i = 0; i < CACHE_BUCKETS_COUNT; ++i) {
struct sheet_scope_reference *ref = scope->reference_buckets[i];
while (ref) {
/* Decrement refcount */
node_refcount_add(ref->cache_node, -1);
/* Add reference to free list */
ref->next_free = tctx->first_free_reference;
tctx->first_free_reference = ref;
ref = ref->next_hash;
}
}
scope->next_free = tctx->first_free_scope;
tctx->first_free_scope = scope;
}
/* ========================== *
* Cache interface
* ========================== */
INTERNAL struct cache_node *node_lookup_touch(struct sheet_scope *scope, struct sheet_tag tag)
{
__prof;
struct cache_node *n = NULL;
struct cache_node *nonmatching = NULL;
struct cache_node **nonmatching_next = NULL;
u64 cache_bucket_index = tag.hash % CACHE_BUCKETS_COUNT;
struct cache_bucket *bucket = &G.cache.buckets[cache_bucket_index];
/* Lookup */
/* TODO: Spinlock */
sys_rw_mutex_lock_shared(&bucket->rw_mutex);
{
nonmatching_next = &bucket->first;
n = *nonmatching_next;
while (n) {
if (n->hash == tag.hash) {
scope_ensure_reference(scope, n, cache_bucket_index);
break;
} else {
nonmatching = n;
nonmatching_next = &nonmatching->next_hash;
n = *nonmatching_next;
}
}
}
sys_rw_mutex_unlock_shared(&bucket->rw_mutex);
/* Allocate new node if necessary */
if (!n) {
__profscope(node_lookup_allocate);
sys_rw_mutex_lock_exclusive(&bucket->rw_mutex);
{
/* Alloc node */
sys_mutex_lock(&G.cache.node_pool_mutex);
{
if (G.cache.node_pool_first_free) {
n = G.cache.node_pool_first_free;
G.cache.node_pool_first_free = n->next_free;
MEMZERO_STRUCT(n);
} else {
n = arena_push_zero(&G.cache.arena, struct cache_node);
}
}
sys_mutex_unlock(&G.cache.node_pool_mutex);
/* Init node and add to bucket */
scope_ensure_reference(scope, n, cache_bucket_index);
*nonmatching_next = n;
if (nonmatching) {
nonmatching->next_hash = n;
n->prev_hash = nonmatching;
}
n->hash = tag.hash;
}
sys_rw_mutex_unlock_exclusive(&bucket->rw_mutex);
}
return n;
}
INTERNAL struct sheet *sheet_from_tag_internal(struct sheet_scope *scope, struct sheet_tag tag, b32 await)
{
struct sheet *res = &g_sheet_loading;
struct cache_node *n = node_lookup_touch(scope, tag);
u32 state = atomic_u32_eval(&n->state);
if (state == CACHE_NODE_STATE_LOADED) {
res = n->sheet;
} else if (state == CACHE_NODE_STATE_NONE) {
if (atomic_u32_eval_compare_exchange(&n->state, CACHE_NODE_STATE_NONE, CACHE_NODE_STATE_QUEUED) == CACHE_NODE_STATE_NONE) {
/* Node is new, load sheet */
if (await) {
sheet_load(n, tag);
res = n->sheet;
} else {
sys_mutex_lock(&G.loaders_mutex);
{
/* Allocate cmd */
struct loader_cmd *cmd = NULL;
if (G.first_free_loader_cmd) {
cmd = G.first_free_loader_cmd;
G.first_free_loader_cmd = cmd->next_free;
MEMZERO_STRUCT(cmd);
} else {
cmd = arena_push_zero(&G.loader_cmd_arena, struct loader_cmd);
}
/* Initialize cmd */
cmd->cache_node = n;
cmd->tag = tag;
{
u64 copy_len = min_u64(tag.path.len, ARRAY_COUNT(cmd->tag_path_buff));
cmd->tag.path.text = cmd->tag_path_buff;
MEMCPY(cmd->tag.path.text, tag.path.text, copy_len);
}
/* Add cmd to queue */
*(G.last_loader_cmd ? &G.last_loader_cmd->next : &G.first_loader_cmd) = cmd;
G.last_loader_cmd = cmd;
/* Cmd holds reference to node */
node_refcount_add(n, 1);
/* Signal work ready */
sys_condition_variable_signal(&G.loaders_cv);
}
sys_mutex_unlock(&G.loaders_mutex);
}
}
}
return res;
}
struct sheet *sheet_from_tag_await(struct sheet_scope *scope, struct sheet_tag tag)
{
__prof;
return sheet_from_tag_internal(scope, tag, true);
}
struct sheet *sheet_from_tag_async(struct sheet_scope *scope, struct sheet_tag tag)
{
__prof;
return sheet_from_tag_internal(scope, tag, false);
}
/* ========================== *
* Sheet data
* ========================== */
struct sheet_span sheet_get_span(struct sheet *sheet, struct string name)
{
struct sheet_span res = { 0 };
if (sheet->spans_count > 0) {
struct sheet_span *entry = fixed_dict_get(&sheet->spans_dict, name);
if (entry) {
res = *entry;
}
}
return res;
}
struct sheet_frame sheet_get_frame(struct sheet *sheet, u32 index)
{
if (sheet->frames_count > 0) {
index = min_u32(sheet->frames_count - 1, index);
return sheet->frames[index];
} else {
return (struct sheet_frame) {
.index = 0,
.duration = 0.1,
.clip = CLIP_ALL
};
}
}
/* ========================== *
* Loader thread
* ========================== */
INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_loader_thread_entry_point, arg)
{
__prof;
(UNUSED)arg;
b32 abort = false;
while (!abort) {
sys_mutex_lock(&G.loaders_mutex);
if (G.loaders_shutdown) {
/* Exit thread */
abort = true;
} else if (!G.first_loader_cmd) {
/* Wait for work */
sys_condition_variable_wait(&G.loaders_cv, &G.loaders_mutex);
}
while (G.first_loader_cmd && !G.loaders_shutdown) {
/* Pull cmd from queue */
struct loader_cmd *cmd = G.first_loader_cmd;
G.first_loader_cmd = cmd->next;
if (G.last_loader_cmd == cmd) {
G.last_loader_cmd = NULL;
}
/* Do work (temporarily unlock) */
sys_mutex_unlock(&G.loaders_mutex);
{
sheet_load(cmd->cache_node, cmd->tag);
}
sys_mutex_lock(&G.loaders_mutex);
/* Free cmd */
cmd->next_free = G.first_free_loader_cmd;
G.first_free_loader_cmd = cmd;
/* Cmd no longer references node */
node_refcount_add(cmd->cache_node, -1);
}
sys_mutex_unlock(&G.loaders_mutex);
}
}
/* ========================== *
* Evictor thread
* ========================== */
struct evict_node {
b32 force_evict;
struct cache_node_refcount refcount;
struct cache_node *cache_node;
struct cache_bucket *cache_bucket;
struct evict_node *next_consider;
struct evict_node *next_consider_lru;
struct evict_node *next_evicted;
};
INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sheet_evictor_thread_entry_point, arg)
{
(UNUSED)arg;
b32 abort = false;
while (!abort) {
struct temp_arena scratch = scratch_begin_no_conflict();
struct evict_node *head_consider = NULL;
struct evict_node *head_consider_lru = NULL;
struct evict_node *head_evicted = NULL;
sys_mutex_lock(&G.evictor_mutex);
{
/* Thread shutdown */
if (G.evictor_shutdown) {
abort = true;
} else {
/* Wait */
sys_condition_variable_wait_time(&G.evictor_cv, &G.evictor_mutex, EVICTOR_CYCLE_INTERVAL);
}
if (!G.evictor_shutdown) {
u32 cur_cycle = *atomic_u32_raw(&G.evictor_cycle);
/* Scan for evictable nodes */
b32 cache_over_budget = atomic_u64_eval(&G.cache.memory_usage) > SHEET_CACHE_MEMORY_BUDGET;
if (cache_over_budget || RESOURCE_RELOADING) {
__profscope(eviction_scan);
for (u64 i = 0; i < CACHE_BUCKETS_COUNT; ++i) {
struct cache_bucket *bucket = &G.cache.buckets[i];
sys_rw_mutex_lock_shared(&bucket->rw_mutex);
{
struct cache_node *n = bucket->first;
while (n) {
b32 consider_for_eviction = false;
b32 force_evict = false;
u64 refcount_uncast = atomic_u64_eval(&n->refcount_struct);
struct cache_node_refcount refcount = *(struct cache_node_refcount *)&refcount_uncast;
if (refcount.count <= 0) {
#if RESOURCE_RELOADING
/* Check if file changed for resource reloading */
if (!consider_for_eviction) {
struct string path = string_from_cstr_len((char *)n->tag_path, n->tag_path_len);
b32 file_changed = false;
struct sys_datetime current_file_time;
{
struct sys_file file = sys_file_open_read(path);
current_file_time = sys_file_get_time(file).modified;
sys_file_close(file);
}
file_changed = MEMCMP_STRUCT(&n->initial_resource_file_modified_time, &current_file_time) != 0;
if (file_changed) {
logf_info("Resource file for sheet \"%F\" has changed. Evicting to allow for reloading.", FMT_STR(path));
consider_for_eviction = true;
force_evict = true;
}
}
#endif
/* Check usage time */
#if RESOURCE_RELOADING
/* Only check conditional if * RESOURCE_RELOADING is enabled,
* since over-budget is assumed to be * true otherwise */
if (cache_over_budget)
#endif
{
u32 last_used_cycle = refcount.last_modified_cycle;
f64 time_since_use = (f64)(cur_cycle - last_used_cycle) * EVICTOR_CYCLE_INTERVAL;
if (time_since_use > EVICTOR_GRACE_PERIOD) {
/* Cache is over budget and node hasn't been referenced in a while */
consider_for_eviction = true;
}
}
}
/* Add node to evict list */
if (consider_for_eviction) {
struct evict_node *evict_node = arena_push_zero(scratch.arena, struct evict_node);
evict_node->cache_node = n;
evict_node->cache_bucket = bucket;
evict_node->refcount = refcount;
evict_node->force_evict = force_evict;
evict_node->next_consider = head_consider;
head_consider = evict_node;
}
n = n->next_hash;
}
sys_rw_mutex_unlock_shared(&bucket->rw_mutex);
}
}
}
/* Sort evict nodes by usage time */
if (head_consider) {
/* TODO: Optimize sort if necessary. Currently O(n^2). */
__profscope(eviction_sort);
for (struct evict_node *en = head_consider; en; en = en->next_consider) {
u32 last_modified_cycle = en->refcount.last_modified_cycle;
struct evict_node *prev = NULL;
struct evict_node *next = head_consider_lru;
while (next && !(last_modified_cycle <= next->refcount.last_modified_cycle || en->force_evict)) {
prev = next;
next = next->next_consider_lru;
}
if (prev) {
prev->next_consider_lru = en;
} else {
head_consider_lru = en;
}
en->next_consider_lru = next;
}
}
/* Remove evictable nodes from cache table until under budget */
if (head_consider_lru) {
__profscope(eviction_cache_removal);
b32 stop_evicting = false;
for (struct evict_node *en = head_consider_lru; en && !stop_evicting; en = en->next_consider_lru) {
struct cache_bucket *bucket = en->cache_bucket;
struct cache_node *n = en->cache_node;
sys_rw_mutex_lock_exclusive(&bucket->rw_mutex);
{
struct cache_node_refcount refcount = *(struct cache_node_refcount *)atomic_u64_raw(&n->refcount_struct);
if (refcount.count > 0 || ((refcount.last_modified_cycle != en->refcount.last_modified_cycle) && !en->force_evict)) {
/* Cache node has been referenced since scan, skip eviction. */
} else if (en->force_evict || atomic_u64_eval(&G.cache.memory_usage) > SHEET_CACHE_MEMORY_BUDGET) {
/* Remove from cache bucket */
if (n->prev_hash) {
n->prev_hash->next_hash = n->next_hash;
} else {
bucket->first = n->next_hash;
}
if (n->next_hash) {
n->next_hash->prev_hash = n->prev_hash;
}
atomic_u64_eval_add(&G.cache.memory_usage, -((i64)n->memory_usage));
/* Add to evicted list */
en->next_evicted = head_evicted;
head_evicted = en;
} else {
/* Cache is no longer over budget or force evicting, stop iteration */
stop_evicting = true;
}
}
sys_rw_mutex_unlock_exclusive(&bucket->rw_mutex);
}
}
if (head_evicted) {
/* Release evicted node memory */
{
__profscope(eviction_memory_release);
for (struct evict_node *en = head_evicted; en; en = en->next_evicted) {
struct cache_node *n = en->cache_node;
arena_release(&n->arena);
}
}
/* Add evicted nodes to free list */
sys_mutex_lock(&G.cache.node_pool_mutex);
{
__profscope(eviction_free_list_append);
for (struct evict_node *en = head_evicted; en; en = en->next_evicted) {
struct cache_node *n = en->cache_node;
n->next_free = G.cache.node_pool_first_free;
G.cache.node_pool_first_free = n;
}
}
sys_mutex_unlock(&G.cache.node_pool_mutex);
}
}
atomic_u32_inc_eval(&G.evictor_cycle);
}
sys_mutex_unlock(&G.evictor_mutex);
scratch_end(scratch);
}
}