1226 lines
46 KiB
C
1226 lines
46 KiB
C
#include "sprite.h"
|
|
#include "arena.h"
|
|
#include "log.h"
|
|
#include "sys.h"
|
|
#include "scratch.h"
|
|
#include "resource.h"
|
|
#include "ase.h"
|
|
#include "util.h"
|
|
#include "work.h"
|
|
#include "atomic.h"
|
|
#include "thread_local.h"
|
|
#include "app.h"
|
|
#include "renderer.h"
|
|
#include "math.h"
|
|
|
|
#define CACHE_MEMORY_BUDGET (MEGABYTE(256))
|
|
#define CACHE_BUCKETS_COUNT 1024
|
|
|
|
#define MAX_LOADER_THREADS 4
|
|
|
|
/* How long between evictor thread scans */
|
|
#define EVICTOR_CYCLE_INTERVAL (RESOURCE_RELOADING ? 0.100 : 0.500)
|
|
|
|
/* Time a cache entry spends unused until it's considered evictable (rounded up to multiple of of EVICTOR_CYCLE_INTERVAL) */
|
|
#define EVICTOR_GRACE_PERIOD 10.000
|
|
|
|
#define TCTX_ARENA_RESERVE MEGABYTE(64)
|
|
|
|
/* Texture arena only used to store texture struct at the moment. Actual image data is allocated on GPU. */
|
|
#define TEXTURE_ARENA_RESERVE MEGABYTE(1)
|
|
|
|
#define SHEET_ARENA_RESERVE MEGABYTE(64)
|
|
#define SHEET_SPAN_LOOKUP_TABLE_BUCKET_RATIO 2.0
|
|
#define SHEET_SLICE_LOOKUP_TABLE_BUCKET_RATIO 2.0
|
|
|
|
/* ========================== *
|
|
* Loader cmd structs
|
|
* ========================== */
|
|
|
|
struct load_cmd {
|
|
struct load_cmd *next_free;
|
|
struct cache_node *cache_node;
|
|
struct sprite_tag tag;
|
|
u8 tag_path_buff[512];
|
|
};
|
|
|
|
/* ========================== *
|
|
* Cache structs
|
|
* ========================== */
|
|
|
|
enum cache_node_kind {
|
|
CACHE_NODE_KIND_TEXTURE,
|
|
CACHE_NODE_KIND_SHEET
|
|
};
|
|
|
|
enum cache_node_state {
|
|
CACHE_NODE_STATE_NONE,
|
|
CACHE_NODE_STATE_QUEUED,
|
|
CACHE_NODE_STATE_WORKING,
|
|
CACHE_NODE_STATE_LOADED
|
|
};
|
|
|
|
struct cache_node_refcount {
|
|
i32 count; /* Number of scopes currently holding a reference to this node */
|
|
u32 last_modified_cycle; /* Last time that refcount was modified */
|
|
};
|
|
CT_ASSERT(sizeof(struct cache_node_refcount) == 8); /* Must fit into 64 bit atomic */
|
|
|
|
struct cache_node_hash {
|
|
u128 v;
|
|
};
|
|
|
|
struct cache_node {
|
|
enum cache_node_kind kind;
|
|
struct cache_node_hash hash;
|
|
struct atomic_u32 state;
|
|
struct atomic_u64 refcount_struct; /* Cast eval to `cache_node_refcount` */
|
|
|
|
/* Allocated data */
|
|
u64 memory_usage;
|
|
struct arena arena;
|
|
struct sprite_texture *texture;
|
|
struct sprite_sheet *sheet;
|
|
|
|
/* Hash list */
|
|
struct cache_node *next_hash;
|
|
struct cache_node *prev_hash;
|
|
|
|
/* Free list */
|
|
struct cache_node *next_free;
|
|
|
|
#if RESOURCE_RELOADING
|
|
struct sys_datetime initial_resource_file_modified_time;
|
|
u64 tag_path_len;
|
|
u8 tag_path[4096];
|
|
#endif
|
|
};
|
|
|
|
struct cache_bucket {
|
|
struct sys_mutex mutex;
|
|
struct cache_node *first;
|
|
};
|
|
|
|
struct cache {
|
|
struct atomic_u64 memory_usage;
|
|
struct arena arena;
|
|
struct cache_bucket *buckets;
|
|
struct sys_mutex node_pool_mutex;
|
|
struct cache_node *node_pool_first_free;
|
|
};
|
|
|
|
struct sprite_scope_reference {
|
|
struct cache_node *cache_node;
|
|
struct sprite_scope_reference *next_hash;
|
|
struct sprite_scope_reference *next_free;
|
|
};
|
|
|
|
/* ========================== *
|
|
* Global state
|
|
* ========================== */
|
|
|
|
GLOBAL struct {
|
|
struct arena perm_arena;
|
|
struct sprite_texture *nil_texture;
|
|
struct sprite_texture *loading_texture;
|
|
struct sprite_sheet *nil_sheet;
|
|
struct sprite_sheet *loading_sheet;
|
|
|
|
/* Cache */
|
|
struct cache cache;
|
|
|
|
/* Load cmds */
|
|
struct sys_mutex load_cmds_mutex;
|
|
struct arena load_cmds_arena;
|
|
struct load_cmd *first_free_load_cmd;
|
|
|
|
/* Evictor thread */
|
|
struct atomic_u32 evictor_cycle;
|
|
b32 evictor_shutdown;
|
|
struct sys_mutex evictor_mutex;
|
|
struct sys_condition_variable evictor_cv;
|
|
|
|
struct sys_thread evictor_thread;
|
|
} G = ZI, DEBUG_ALIAS(G, G_sprite);
|
|
|
|
/* ========================== *
|
|
* Thread local state
|
|
* ========================== */
|
|
|
|
struct sprite_tctx {
|
|
struct arena arena;
|
|
struct sprite_scope *first_free_scope;
|
|
struct sprite_scope_reference *first_free_reference;
|
|
};
|
|
|
|
INTERNAL THREAD_LOCAL_VAR_ALLOC_FUNC_DEF(sprite_tctx_alloc, vtctx)
|
|
{
|
|
struct sprite_tctx *tctx = (struct sprite_tctx *)vtctx;
|
|
tctx->arena = arena_alloc(MEGABYTE(64));
|
|
}
|
|
|
|
INTERNAL THREAD_LOCAL_VAR_RELEASE_FUNC_DEF(sprite_tctx_release, vtctx)
|
|
{
|
|
struct sprite_tctx *tctx = (struct sprite_tctx *)vtctx;
|
|
arena_release(&tctx->arena);
|
|
}
|
|
|
|
GLOBAL THREAD_LOCAL_VAR_DEF(tl_sprite_tctx, struct sprite_tctx, sprite_tctx_alloc, sprite_tctx_release);
|
|
|
|
/* ========================== *
|
|
* Purple-black image
|
|
* ========================== */
|
|
|
|
INTERNAL struct image_rgba generate_purple_black_image(struct arena *arena, u32 width, u32 height)
|
|
{
|
|
u32 *pixels = arena_push_array(arena, u32, width * height);
|
|
|
|
/* Create texture containing alternating blocks of purple and black */
|
|
u32 color_size = 4;
|
|
u32 color_1 = 0xFFDC00FF;
|
|
u32 color_2 = 0xFF000000;
|
|
for (u32 x = 0; x < width; ++x) {
|
|
for (u32 y = 0; y < height; ++y) {
|
|
u32 pixel_index = x + width * y;
|
|
if ((y / color_size) % 2 == 0) {
|
|
if ((x / color_size) % 2 == 0) {
|
|
pixels[pixel_index] = color_1;
|
|
} else {
|
|
pixels[pixel_index] = color_2;
|
|
}
|
|
} else {
|
|
if ((x / color_size) % 2 == 0) {
|
|
pixels[pixel_index] = color_2;
|
|
} else {
|
|
pixels[pixel_index] = color_1;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return (struct image_rgba) {
|
|
.width = width,
|
|
.height = height,
|
|
.pixels = pixels
|
|
};
|
|
}
|
|
|
|
/* ========================== *
|
|
* Startup
|
|
* ========================== */
|
|
|
|
INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(sprite_shutdown);
|
|
INTERNAL WORK_TASK_FUNC_DEF(sprite_load_task, arg);
|
|
INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg);
|
|
|
|
struct sprite_startup_receipt sprite_startup(struct renderer_startup_receipt *renderer_sr,
|
|
struct resource_startup_receipt *resource_sr)
|
|
{
|
|
(UNUSED)renderer_sr;
|
|
(UNUSED)resource_sr;
|
|
|
|
G.perm_arena = arena_alloc(MEGABYTE(1));
|
|
{
|
|
/* Init loading texture */
|
|
G.loading_texture = arena_push_zero(&G.perm_arena, struct sprite_texture);
|
|
/* Init nil texture */
|
|
G.nil_texture = arena_push_zero(&G.perm_arena, struct sprite_texture);
|
|
G.nil_texture->loaded = true;
|
|
{
|
|
struct temp_arena scratch = scratch_begin_no_conflict();
|
|
struct image_rgba purple_black_image = generate_purple_black_image(scratch.arena, 64, 64);
|
|
G.nil_texture->renderer_handle = renderer_texture_alloc(purple_black_image);
|
|
scratch_end(scratch);
|
|
}
|
|
|
|
/* Init loading sheet */
|
|
G.loading_sheet = arena_push_zero(&G.perm_arena, struct sprite_sheet);
|
|
G.loading_sheet->image_size = V2(PIXELS_PER_UNIT, PIXELS_PER_UNIT);
|
|
G.loading_sheet->frame_size = V2(PIXELS_PER_UNIT, PIXELS_PER_UNIT);
|
|
|
|
/* Init nil sheet */
|
|
G.nil_sheet = arena_push_zero(&G.perm_arena, struct sprite_sheet);
|
|
G.nil_sheet->image_size = V2(PIXELS_PER_UNIT, PIXELS_PER_UNIT);
|
|
G.nil_sheet->frame_size = V2(PIXELS_PER_UNIT, PIXELS_PER_UNIT);
|
|
G.nil_sheet->loaded = true;
|
|
}
|
|
arena_set_readonly(&G.perm_arena);
|
|
|
|
G.cache.node_pool_mutex = sys_mutex_alloc();
|
|
G.cache.arena = arena_alloc(GIGABYTE(64));
|
|
G.cache.buckets = arena_push_array_zero(&G.cache.arena, struct cache_bucket, CACHE_BUCKETS_COUNT);
|
|
for (u64 i = 0; i < CACHE_BUCKETS_COUNT; ++i) {
|
|
G.cache.buckets[i].mutex = sys_mutex_alloc();
|
|
}
|
|
|
|
G.load_cmds_arena = arena_alloc(GIGABYTE(64));
|
|
G.load_cmds_mutex = sys_mutex_alloc();
|
|
|
|
G.evictor_mutex = sys_mutex_alloc();
|
|
G.evictor_cv = sys_condition_variable_alloc();
|
|
|
|
G.evictor_thread = sys_thread_alloc(sprite_evictor_thread_entry_point, NULL, STR("[P0] Sprite evictor"));
|
|
|
|
app_register_exit_callback(&sprite_shutdown);
|
|
|
|
return (struct sprite_startup_receipt) { 0 };
|
|
}
|
|
|
|
INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(sprite_shutdown)
|
|
{
|
|
__prof;
|
|
/* Signal evictor shutdown */
|
|
{
|
|
struct sys_lock lock = sys_mutex_lock_e(&G.evictor_mutex);
|
|
G.evictor_shutdown = true;
|
|
sys_condition_variable_broadcast(&G.evictor_cv);
|
|
sys_mutex_unlock(&lock);
|
|
}
|
|
sys_thread_wait_release(&G.evictor_thread);
|
|
}
|
|
|
|
/* ========================== *
|
|
* Tag
|
|
* ========================== */
|
|
|
|
struct sprite_tag sprite_tag_from_path(struct string path)
|
|
{
|
|
struct sprite_tag res = ZI;
|
|
res.hash = HASH_FNV128_BASIS;
|
|
res.hash = hash_fnv128(res.hash, BUFFER_FROM_STRING(path));
|
|
res.path = path;
|
|
return res;
|
|
}
|
|
|
|
b32 sprite_tag_is_nil(struct sprite_tag tag)
|
|
{
|
|
return u128_eq(tag.hash, U128(0, 0));
|
|
}
|
|
|
|
b32 sprite_tag_eq(struct sprite_tag t1, struct sprite_tag t2)
|
|
{
|
|
return u128_eq(t1.hash, t2.hash);
|
|
}
|
|
|
|
INTERNAL struct cache_node_hash cache_node_hash_from_tag_hash(u128 tag_hash, enum cache_node_kind kind)
|
|
{
|
|
return (struct cache_node_hash) { .v = hash_fnv128(tag_hash, BUFFER(1, (u8 *)&kind)) };
|
|
}
|
|
|
|
/* ========================== *
|
|
* Refcount
|
|
* ========================== */
|
|
|
|
INTERNAL void node_refcount_add(struct cache_node *n, i32 amount)
|
|
{
|
|
u32 evictor_cycle = atomic_u32_eval(&G.evictor_cycle);
|
|
struct atomic_u64 *refcount_atomic = &n->refcount_struct;
|
|
u64 old_refcount_uncast = atomic_u64_eval(refcount_atomic);
|
|
do {
|
|
struct cache_node_refcount new_refcount = *(struct cache_node_refcount *)&old_refcount_uncast;
|
|
new_refcount.count += amount;
|
|
new_refcount.last_modified_cycle = evictor_cycle;
|
|
u64 v = atomic_u64_eval_compare_exchange(refcount_atomic, old_refcount_uncast, *(u64 *)&new_refcount);
|
|
if (v != old_refcount_uncast) {
|
|
old_refcount_uncast = v;
|
|
} else {
|
|
break;
|
|
}
|
|
} while (true);
|
|
}
|
|
|
|
/* ========================== *
|
|
* Load
|
|
* ========================== */
|
|
|
|
INTERNAL void cache_node_load_texture(struct cache_node *n, struct sprite_tag tag)
|
|
{
|
|
__prof;
|
|
struct temp_arena scratch = scratch_begin_no_conflict();
|
|
|
|
atomic_u32_eval_exchange(&n->state, CACHE_NODE_STATE_WORKING);
|
|
struct string path = tag.path;
|
|
|
|
logf_info("Loading sprite texture \"%F\"", FMT_STR(path));
|
|
sys_timestamp_t start_ts = sys_timestamp();
|
|
|
|
ASSERT(string_ends_with(path, STR(".ase")));
|
|
ASSERT(n->kind == CACHE_NODE_KIND_TEXTURE);
|
|
|
|
/* TODO: Arena probably overkill. Just using it to store texture struct. */
|
|
n->arena = arena_alloc(TEXTURE_ARENA_RESERVE);
|
|
u64 memory_size = 0;
|
|
{
|
|
/* Decode */
|
|
struct ase_decode_image_result decoded = ZI;
|
|
if (resource_exists(path)) {
|
|
struct resource texture_rs = resource_open(path);
|
|
decoded = ase_decode_image(scratch.arena, texture_rs.bytes);
|
|
#if RESOURCE_RELOADING
|
|
n->initial_resource_file_modified_time = sys_file_get_time(texture_rs.file).modified;
|
|
#endif
|
|
resource_close(texture_rs);
|
|
|
|
/* Initialize */
|
|
n->texture = arena_push(&n->arena, struct sprite_texture);
|
|
n->texture->width = decoded.image.width;
|
|
n->texture->height = decoded.image.height;
|
|
n->texture->renderer_handle = renderer_texture_alloc(decoded.image);
|
|
n->texture->valid = true;
|
|
n->texture->loaded = true;
|
|
/* TODO: Query renderer for more accurate texture size in VRAM */
|
|
memory_size += (decoded.image.width * decoded.image.height) * sizeof(*decoded.image.pixels);
|
|
} else {
|
|
logf_error("Sprite \"%F\" not found", FMT_STR(path));
|
|
}
|
|
#if RESOURCE_RELOADING
|
|
u64 cpy_len = min_u64(tag.path.len, ARRAY_COUNT(n->tag_path));
|
|
n->tag_path_len = cpy_len;
|
|
MEMCPY(n->tag_path, tag.path.text, cpy_len);
|
|
#endif
|
|
}
|
|
arena_set_readonly(&n->arena);
|
|
n->memory_usage = n->arena.committed + memory_size;
|
|
atomic_u64_eval_add(&G.cache.memory_usage, n->memory_usage);
|
|
|
|
logf_info("Finished loading sprite texture \"%F\" in %F seconds (cache size: %F bytes).",
|
|
FMT_STR(path),
|
|
FMT_FLOAT(sys_timestamp_seconds(sys_timestamp() - start_ts)),
|
|
FMT_UINT(n->memory_usage));
|
|
|
|
atomic_u32_eval_exchange(&n->state, CACHE_NODE_STATE_LOADED);
|
|
|
|
scratch_end(scratch);
|
|
}
|
|
|
|
INTERNAL struct sprite_sheet init_sheet_from_ase_result(struct arena *arena, struct ase_decode_sheet_result ase)
|
|
{
|
|
__prof;
|
|
struct sprite_sheet sheet = ZI;
|
|
|
|
ASSERT(ase.num_frames >= 1);
|
|
|
|
struct v2 frame_size = ase.frame_size;
|
|
struct v2 frame_center = v2_mul(ase.frame_size, 0.5f);
|
|
|
|
/* Init frames */
|
|
{
|
|
__profscope(init_frames);
|
|
sheet.image_size = ase.image_size;
|
|
sheet.frame_size = ase.frame_size;
|
|
sheet.frames = arena_push_array_zero(arena, struct sprite_sheet_frame, ase.num_frames);
|
|
sheet.frames_count = ase.num_frames;
|
|
for (struct ase_frame *ase_frame = ase.frame_head; ase_frame; ase_frame = ase_frame->next) {
|
|
u32 index = ase_frame->index;
|
|
|
|
struct v2 clip_p1 = { (f32)ase_frame->x1 / (f32)ase.image_size.x, (f32)ase_frame->y1 / (f32)ase.image_size.y };
|
|
struct v2 clip_p2 = { (f32)ase_frame->x2 / (f32)ase.image_size.x, (f32)ase_frame->y2 / (f32)ase.image_size.y };
|
|
|
|
sheet.frames[index] = (struct sprite_sheet_frame) {
|
|
.index = index,
|
|
.duration = ase_frame->duration,
|
|
.clip = (struct clip_rect) { clip_p1, clip_p2 }
|
|
};
|
|
}
|
|
}
|
|
|
|
/* Init spans */
|
|
sheet.spans_count = ase.num_spans;
|
|
if (ase.num_spans > 0) {
|
|
__profscope(init_spans);
|
|
sheet.spans = arena_push_array_zero(arena, struct sprite_sheet_span, sheet.spans_count);
|
|
sheet.spans_dict = fixed_dict_init(arena, (u64)(ase.num_spans * SHEET_SPAN_LOOKUP_TABLE_BUCKET_RATIO));
|
|
u64 index = 0;
|
|
for (struct ase_span *ase_span = ase.span_head; ase_span; ase_span = ase_span->next) {
|
|
struct string name = string_copy(arena, ase_span->name);
|
|
struct sprite_sheet_span *span = &sheet.spans[index];
|
|
span->name = name;
|
|
span->start = ase_span->start;
|
|
span->end = ase_span->end;
|
|
fixed_dict_set(arena, &sheet.spans_dict, name, span);
|
|
++index;
|
|
}
|
|
}
|
|
|
|
/* Init slices */
|
|
if (ase.num_slice_keys > 0) {
|
|
__profscope(init_slices);
|
|
struct temp_arena scratch = scratch_begin(arena);
|
|
|
|
struct temp_ase_slice_key_node {
|
|
struct ase_slice_key *key;
|
|
struct temp_ase_slice_key_node *next;
|
|
|
|
u32 index_in_frame;
|
|
u32 earliest_frame;
|
|
};
|
|
|
|
struct temp_slice_group_node {
|
|
struct string name;
|
|
u64 per_frame_count;
|
|
struct temp_ase_slice_key_node *temp_ase_slice_key_head;
|
|
struct temp_slice_group_node *next;
|
|
|
|
struct sprite_sheet_slice_group *final_slice_group;
|
|
};
|
|
|
|
/* Group slices by name and find out counts per frame */
|
|
u64 num_temp_slice_group_nodes = 0;
|
|
struct temp_slice_group_node *temp_slice_group_head = NULL;
|
|
{
|
|
struct fixed_dict temp_slice_dict = fixed_dict_init(scratch.arena, (u64)(ase.num_slice_keys * 2));
|
|
for (struct ase_slice_key *ase_slice_key = ase.slice_key_head; ase_slice_key; ase_slice_key = ase_slice_key->next) {
|
|
struct string name = ase_slice_key->name;
|
|
|
|
struct temp_slice_group_node *temp_slice_group_node = fixed_dict_get(&temp_slice_dict, name);
|
|
if (!temp_slice_group_node) {
|
|
temp_slice_group_node = arena_push_zero(scratch.arena, struct temp_slice_group_node);
|
|
temp_slice_group_node->name = name;
|
|
fixed_dict_set(scratch.arena, &temp_slice_dict, name, temp_slice_group_node);
|
|
|
|
++num_temp_slice_group_nodes;
|
|
temp_slice_group_node->next = temp_slice_group_head;
|
|
temp_slice_group_head = temp_slice_group_node;
|
|
}
|
|
|
|
struct temp_ase_slice_key_node *node = arena_push_zero(scratch.arena, struct temp_ase_slice_key_node);
|
|
node->key = ase_slice_key;
|
|
node->next = temp_slice_group_node->temp_ase_slice_key_head;
|
|
node->earliest_frame = U32_MAX; /* To be overwritten later after iterating */
|
|
|
|
temp_slice_group_node->temp_ase_slice_key_head = node;
|
|
|
|
++temp_slice_group_node->per_frame_count;
|
|
}
|
|
}
|
|
|
|
/* Allocate slice groups & fill originals in 2d array */
|
|
sheet.slice_groups_count = num_temp_slice_group_nodes;
|
|
sheet.slice_groups = arena_push_array_zero(arena, struct sprite_sheet_slice_group, sheet.slice_groups_count);
|
|
sheet.slice_groups_dict = fixed_dict_init(arena, (u64)(num_temp_slice_group_nodes * SHEET_SLICE_LOOKUP_TABLE_BUCKET_RATIO));
|
|
|
|
u64 index = 0;
|
|
for (struct temp_slice_group_node *temp_slice_group_node = temp_slice_group_head; temp_slice_group_node; temp_slice_group_node = temp_slice_group_node->next) {
|
|
struct sprite_sheet_slice_group *slice_group = &sheet.slice_groups[index];
|
|
slice_group->name = string_copy(arena, temp_slice_group_node->name);
|
|
slice_group->per_frame_count = temp_slice_group_node->per_frame_count;
|
|
|
|
arena_align(arena, alignof(struct sprite_sheet_slice));
|
|
slice_group->frame_slices = (struct sprite_sheet_slice *)arena_push_array_zero(arena, u8, (ase.num_frames * slice_group->per_frame_count) * sizeof(struct sprite_sheet_slice));
|
|
|
|
u64 index_in_frame = 0;
|
|
for (struct temp_ase_slice_key_node *node = temp_slice_group_node->temp_ase_slice_key_head; node; node = node->next) {
|
|
struct ase_slice_key *key = node->key;
|
|
|
|
for (struct ase_slice *ase_slice = key->slice_head; ase_slice; ase_slice = ase_slice->next) {
|
|
u32 start = ase_slice->start;
|
|
|
|
struct sprite_sheet_slice *slice = &slice_group->frame_slices[(start * slice_group->per_frame_count) + index_in_frame];
|
|
slice->original = true;
|
|
|
|
f32 x1_px = ase_slice->x1;
|
|
f32 y1_px = ase_slice->y1;
|
|
f32 x2_px = ase_slice->x2;
|
|
f32 y2_px = ase_slice->y2;
|
|
f32 width_px = x2_px - x1_px;
|
|
f32 height_px = y2_px - y1_px;
|
|
|
|
f32 x1 = (x1_px - frame_center.x) / frame_size.x;
|
|
f32 y1 = (y1_px - frame_center.y) / frame_size.y;
|
|
f32 x2 = (x2_px - frame_center.x) / frame_size.x;
|
|
f32 y2 = (y2_px - frame_center.y) / frame_size.y;
|
|
f32 width = x2 - x1;
|
|
f32 height = y2 - y1;
|
|
|
|
/* Rect */
|
|
struct rect rect_px = RECT(x1_px, y1_px, width_px, height_px);
|
|
struct rect rect = RECT(x1, y1, width, height);
|
|
/* Center */
|
|
struct v2 center_px = V2(x1_px + (width_px * 0.5f), y1_px + (height_px * 0.5f));
|
|
struct v2 center = V2(x1 + (width * 0.5f), y1 + (height * 0.5f));
|
|
/* Dir */
|
|
struct v2 dir_px = V2(center_px.x, -1);
|
|
struct v2 dir = V2(0, -1);
|
|
|
|
slice->rect_px = rect_px;
|
|
slice->center_px = center_px;
|
|
slice->dir_px = dir_px;
|
|
|
|
slice->rect = rect;
|
|
slice->center = center;
|
|
slice->dir = dir;
|
|
|
|
node->index_in_frame = index_in_frame;
|
|
if (start < node->earliest_frame) {
|
|
node->earliest_frame = start;
|
|
}
|
|
}
|
|
|
|
++index_in_frame;
|
|
}
|
|
|
|
temp_slice_group_node->final_slice_group = slice_group;
|
|
fixed_dict_set(arena, &sheet.slice_groups_dict, slice_group->name, slice_group);
|
|
++index;
|
|
}
|
|
|
|
/* Propogate original slices into next frames (and first slices into previous frames) */
|
|
for (struct temp_slice_group_node *temp_slice_group_node = temp_slice_group_head; temp_slice_group_node; temp_slice_group_node = temp_slice_group_node->next) {
|
|
struct sprite_sheet_slice_group *slice_group = temp_slice_group_node->final_slice_group;
|
|
|
|
for (struct temp_ase_slice_key_node *node = temp_slice_group_node->temp_ase_slice_key_head; node; node = node->next) {
|
|
struct ase_slice_key *key = node->key;
|
|
u32 index_in_frame = node->index_in_frame;
|
|
for (struct ase_slice *ase_slice = key->slice_head; ase_slice; ase_slice = ase_slice->next) {
|
|
u32 start = ase_slice->start;
|
|
|
|
struct sprite_sheet_slice *slice = &slice_group->frame_slices[(start * slice_group->per_frame_count) + index_in_frame];
|
|
|
|
/* Propogate earliest slice to all previous frames */
|
|
if (start == node->earliest_frame && start > 0) {
|
|
for (u32 i = start; i-- > 0;) {
|
|
struct sprite_sheet_slice *target = &slice_group->frame_slices[(i * slice_group->per_frame_count) + index_in_frame];
|
|
*target = *slice;
|
|
target->original = false;
|
|
}
|
|
}
|
|
|
|
/* Propogate slice to forward frames until original is found */
|
|
for (u32 i = start + 1; i < ase.num_frames; ++i) {
|
|
struct sprite_sheet_slice *target = &slice_group->frame_slices[(i * slice_group->per_frame_count) + index_in_frame];
|
|
if (target->original) {
|
|
break;
|
|
} else {
|
|
*target = *slice;
|
|
target->original = false;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Calculate dirs */
|
|
for (struct temp_slice_group_node *temp_slice_group_node = temp_slice_group_head; temp_slice_group_node; temp_slice_group_node = temp_slice_group_node->next) {
|
|
struct string ray_suffix = STR(".ray");
|
|
|
|
struct sprite_sheet_slice_group *ray_slice_group = temp_slice_group_node->final_slice_group;
|
|
struct string ray_slice_name = ray_slice_group->name;
|
|
if (string_ends_with(ray_slice_name, ray_suffix)) {
|
|
struct string point_slice_name = ray_slice_name;
|
|
point_slice_name.len -= ray_suffix.len;
|
|
|
|
struct sprite_sheet_slice_group *point_slice_group = fixed_dict_get(&sheet.slice_groups_dict, point_slice_name);
|
|
if (point_slice_group) {
|
|
u32 point_slices_per_frame = point_slice_group->per_frame_count;
|
|
|
|
for (u32 i = 0; i < ase.num_frames; ++i) {
|
|
/* Use ray slice in ray group */
|
|
struct sprite_sheet_slice *ray_slice = &ray_slice_group->frame_slices[i * point_slices_per_frame];
|
|
struct v2 ray_end = ray_slice->center_px;
|
|
struct v2 ray_end_norm = ray_slice->center;
|
|
|
|
/* Apply to each point slice in point group */
|
|
for (u32 j = 0; j < point_slices_per_frame; ++j) {
|
|
struct sprite_sheet_slice *point_slice = &point_slice_group->frame_slices[(i * point_slices_per_frame) + j];
|
|
point_slice->dir_px = v2_sub(ray_end, point_slice->center_px);
|
|
point_slice->dir = v2_sub(ray_end_norm, point_slice->center);
|
|
point_slice->has_ray = true;
|
|
}
|
|
}
|
|
|
|
}
|
|
}
|
|
}
|
|
|
|
scratch_end(scratch);
|
|
}
|
|
|
|
return sheet;
|
|
}
|
|
|
|
INTERNAL void cache_node_load_sheet(struct cache_node *n, struct sprite_tag tag)
|
|
{
|
|
__prof;
|
|
struct temp_arena scratch = scratch_begin_no_conflict();
|
|
|
|
atomic_u32_eval_exchange(&n->state, CACHE_NODE_STATE_WORKING);
|
|
struct string path = tag.path;
|
|
|
|
logf_info("Loading sprite sheet \"%F\"", FMT_STR(path));
|
|
sys_timestamp_t start_ts = sys_timestamp();
|
|
|
|
//ASSERT(string_ends_with(path, STR(".ase")));
|
|
ASSERT(n->kind == CACHE_NODE_KIND_SHEET);
|
|
|
|
n->arena = arena_alloc(SHEET_ARENA_RESERVE);
|
|
{
|
|
/* Decode */
|
|
struct ase_decode_sheet_result decoded = ZI;
|
|
if (resource_exists(path)) {
|
|
struct resource sheet_rs = resource_open(path);
|
|
decoded = ase_decode_sheet(scratch.arena, sheet_rs.bytes);
|
|
#if RESOURCE_RELOADING
|
|
n->initial_resource_file_modified_time = sys_file_get_time(sheet_rs.file).modified;
|
|
#endif
|
|
resource_close(sheet_rs);
|
|
|
|
/* Initialize */
|
|
n->sheet = arena_push(&n->arena, struct sprite_sheet);
|
|
*n->sheet = init_sheet_from_ase_result(&n->arena, decoded);
|
|
n->sheet->loaded = true;
|
|
n->sheet->valid = true;
|
|
} else {
|
|
logf_error("Sprite \"%F\" not found", FMT_STR(path));
|
|
}
|
|
}
|
|
#if RESOURCE_RELOADING
|
|
u64 cpy_len = min_u64(tag.path.len, ARRAY_COUNT(n->tag_path));
|
|
n->tag_path_len = cpy_len;
|
|
MEMCPY(n->tag_path, tag.path.text, cpy_len);
|
|
#endif
|
|
arena_set_readonly(&n->arena);
|
|
n->memory_usage = n->arena.committed;
|
|
atomic_u64_eval_add(&G.cache.memory_usage, n->memory_usage);
|
|
|
|
logf_info("Finished loading sprite sheet \"%F\" in %F seconds (cache size: %F bytes).",
|
|
FMT_STR(path),
|
|
FMT_FLOAT(sys_timestamp_seconds(sys_timestamp() - start_ts)),
|
|
FMT_UINT(n->memory_usage));
|
|
|
|
|
|
atomic_u32_eval_exchange(&n->state, CACHE_NODE_STATE_LOADED);
|
|
|
|
scratch_end(scratch);
|
|
}
|
|
|
|
/* ========================== *
|
|
* Scope
|
|
* ========================== */
|
|
|
|
INTERNAL void scope_ensure_reference(struct sprite_scope *scope, struct cache_node *cache_node, u64 cache_bucket_index)
|
|
{
|
|
__prof;
|
|
struct sprite_scope_reference **ref_next = &scope->reference_buckets[cache_bucket_index];
|
|
struct sprite_scope_reference *ref = *ref_next;
|
|
while (ref) {
|
|
if (ref->cache_node == cache_node) {
|
|
/* Scope already references node */
|
|
break;
|
|
} else {
|
|
ref_next = &ref->next_hash;
|
|
ref = *ref_next;
|
|
}
|
|
}
|
|
|
|
if (!ref) {
|
|
/* Increment refcount */
|
|
node_refcount_add(cache_node, 1);
|
|
/* Add reference to scope */
|
|
struct sprite_tctx *tctx = thread_local_var_eval(&tl_sprite_tctx);
|
|
if (tctx->first_free_reference) {
|
|
ref = tctx->first_free_reference;
|
|
tctx->first_free_reference = ref->next_free;
|
|
MEMZERO_STRUCT(ref);
|
|
} else {
|
|
ref = arena_push_zero(&tctx->arena, struct sprite_scope_reference);
|
|
}
|
|
ref->cache_node = cache_node;
|
|
*ref_next = ref;
|
|
}
|
|
}
|
|
|
|
struct sprite_scope *sprite_scope_begin(void)
|
|
{
|
|
struct sprite_tctx *tctx = thread_local_var_eval(&tl_sprite_tctx);
|
|
|
|
struct sprite_scope *res = NULL;
|
|
if (tctx->first_free_scope) {
|
|
res = tctx->first_free_scope;
|
|
tctx->first_free_scope = res->next_free;
|
|
MEMZERO(res->reference_buckets, sizeof(*res->reference_buckets) * CACHE_BUCKETS_COUNT);
|
|
*res = (struct sprite_scope) {
|
|
.reference_buckets = res->reference_buckets
|
|
};
|
|
} else {
|
|
res = arena_push_zero(&tctx->arena, struct sprite_scope);
|
|
res->reference_buckets = arena_push_array_zero(&tctx->arena, struct sprite_scope_reference *, CACHE_BUCKETS_COUNT);
|
|
}
|
|
|
|
return res;
|
|
}
|
|
|
|
void sprite_scope_end(struct sprite_scope *scope)
|
|
{
|
|
struct sprite_tctx *tctx = thread_local_var_eval(&tl_sprite_tctx);
|
|
for (u64 i = 0; i < CACHE_BUCKETS_COUNT; ++i) {
|
|
struct sprite_scope_reference *ref = scope->reference_buckets[i];
|
|
while (ref) {
|
|
/* Decrement refcount */
|
|
node_refcount_add(ref->cache_node, -1);
|
|
/* Add reference to free list */
|
|
ref->next_free = tctx->first_free_reference;
|
|
tctx->first_free_reference = ref;
|
|
ref = ref->next_hash;
|
|
}
|
|
}
|
|
scope->next_free = tctx->first_free_scope;
|
|
tctx->first_free_scope = scope;
|
|
}
|
|
|
|
/* ========================== *
|
|
* Cache interface
|
|
* ========================== */
|
|
|
|
INTERNAL struct cache_node *node_lookup_touch(struct sprite_scope *scope, struct sprite_tag tag, enum cache_node_kind kind)
|
|
{
|
|
__prof;
|
|
|
|
struct cache_node *n = NULL;
|
|
struct cache_node *nonmatching = NULL;
|
|
struct cache_node **nonmatching_next = NULL;
|
|
|
|
struct cache_node_hash hash = cache_node_hash_from_tag_hash(tag.hash, kind);
|
|
u64 cache_bucket_index = U128_LO64(hash.v) % CACHE_BUCKETS_COUNT;
|
|
struct cache_bucket *bucket = &G.cache.buckets[cache_bucket_index];
|
|
|
|
/* Lookup */
|
|
/* TODO: Spinlock */
|
|
{
|
|
struct sys_lock lock = sys_mutex_lock_s(&bucket->mutex);
|
|
nonmatching_next = &bucket->first;
|
|
n = *nonmatching_next;
|
|
while (n) {
|
|
if (u128_eq(n->hash.v, hash.v)) {
|
|
scope_ensure_reference(scope, n, cache_bucket_index);
|
|
break;
|
|
} else {
|
|
nonmatching = n;
|
|
nonmatching_next = &nonmatching->next_hash;
|
|
n = *nonmatching_next;
|
|
}
|
|
}
|
|
sys_mutex_unlock(&lock);
|
|
}
|
|
|
|
/* Allocate new node if necessary */
|
|
if (!n) {
|
|
__profscope(node_lookup_allocate);
|
|
struct sys_lock bucket_lock = sys_mutex_lock_e(&bucket->mutex);
|
|
{
|
|
/* Alloc node */
|
|
{
|
|
struct sys_lock pool_lock = sys_mutex_lock_e(&G.cache.node_pool_mutex);
|
|
if (G.cache.node_pool_first_free) {
|
|
n = G.cache.node_pool_first_free;
|
|
G.cache.node_pool_first_free = n->next_free;
|
|
MEMZERO_STRUCT(n);
|
|
} else {
|
|
n = arena_push_zero(&G.cache.arena, struct cache_node);
|
|
}
|
|
sys_mutex_unlock(&pool_lock);
|
|
}
|
|
/* Init node and add to bucket */
|
|
scope_ensure_reference(scope, n, cache_bucket_index);
|
|
*nonmatching_next = n;
|
|
if (nonmatching) {
|
|
nonmatching->next_hash = n;
|
|
n->prev_hash = nonmatching;
|
|
}
|
|
n->hash = cache_node_hash_from_tag_hash(tag.hash, kind);
|
|
n->kind = kind;
|
|
n->texture = G.nil_texture;
|
|
n->sheet = G.nil_sheet;
|
|
}
|
|
sys_mutex_unlock(&bucket_lock);
|
|
}
|
|
|
|
return n;
|
|
}
|
|
|
|
INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_tag tag, enum cache_node_kind kind, b32 await)
|
|
{
|
|
/* TODO: Replace switch statements */
|
|
void *res = NULL;
|
|
switch (kind) {
|
|
case CACHE_NODE_KIND_TEXTURE: { res = G.loading_texture; } break;
|
|
case CACHE_NODE_KIND_SHEET: { res = G.loading_sheet; } break;
|
|
}
|
|
|
|
struct cache_node *n = node_lookup_touch(scope, tag, kind);
|
|
|
|
u32 state = atomic_u32_eval(&n->state);
|
|
if (state == CACHE_NODE_STATE_LOADED) {
|
|
switch (kind) {
|
|
case CACHE_NODE_KIND_TEXTURE: { res = n->texture; } break;
|
|
case CACHE_NODE_KIND_SHEET: { res = n->sheet; } break;
|
|
}
|
|
} else if (state == CACHE_NODE_STATE_NONE) {
|
|
if (atomic_u32_eval_compare_exchange(&n->state, CACHE_NODE_STATE_NONE, CACHE_NODE_STATE_QUEUED) == CACHE_NODE_STATE_NONE) {
|
|
/* Node is new, load texture */
|
|
if (await) {
|
|
switch (kind) {
|
|
case CACHE_NODE_KIND_TEXTURE: {
|
|
cache_node_load_texture(n, tag);
|
|
res = n->texture;
|
|
} break;
|
|
case CACHE_NODE_KIND_SHEET: {
|
|
cache_node_load_sheet(n, tag);
|
|
res = n->sheet;
|
|
} break;
|
|
}
|
|
} else {
|
|
struct sys_lock lock = sys_mutex_lock_e(&G.load_cmds_mutex);
|
|
|
|
/* Allocate cmd */
|
|
struct load_cmd *cmd = NULL;
|
|
if (G.first_free_load_cmd) {
|
|
cmd = G.first_free_load_cmd;
|
|
G.first_free_load_cmd = cmd->next_free;
|
|
MEMZERO_STRUCT(cmd);
|
|
} else {
|
|
cmd = arena_push_zero(&G.load_cmds_arena, struct load_cmd);
|
|
}
|
|
|
|
/* Initialize cmd */
|
|
cmd->cache_node = n;
|
|
cmd->tag = tag;
|
|
{
|
|
u64 copy_len = min_u64(tag.path.len, ARRAY_COUNT(cmd->tag_path_buff));
|
|
cmd->tag.path.text = cmd->tag_path_buff;
|
|
MEMCPY(cmd->tag.path.text, tag.path.text, copy_len);
|
|
}
|
|
|
|
/* Cmd holds reference to node */
|
|
node_refcount_add(n, 1);
|
|
|
|
/* Push work */
|
|
work_push_task(&sprite_load_task, cmd, WORK_PRIORITY_NORMAL);
|
|
|
|
sys_mutex_unlock(&lock);
|
|
}
|
|
}
|
|
}
|
|
|
|
return res;
|
|
}
|
|
|
|
/* ========================== *
|
|
* Texture
|
|
* ========================== */
|
|
|
|
struct sprite_texture *sprite_texture_from_tag_await(struct sprite_scope *scope, struct sprite_tag tag)
|
|
{
|
|
__prof;
|
|
return (struct sprite_texture *)data_from_tag_internal(scope, tag, CACHE_NODE_KIND_TEXTURE, true);
|
|
}
|
|
|
|
struct sprite_texture *sprite_texture_from_tag_async(struct sprite_scope *scope, struct sprite_tag tag)
|
|
{
|
|
__prof;
|
|
return (struct sprite_texture *)data_from_tag_internal(scope, tag, CACHE_NODE_KIND_TEXTURE, false);
|
|
}
|
|
|
|
/* ========================== *
|
|
* Sheet
|
|
* ========================== */
|
|
|
|
struct sprite_sheet *sprite_sheet_from_tag_await(struct sprite_scope *scope, struct sprite_tag tag)
|
|
{
|
|
__prof;
|
|
return (struct sprite_sheet *)data_from_tag_internal(scope, tag, CACHE_NODE_KIND_SHEET, true);
|
|
}
|
|
|
|
struct sprite_sheet *sprite_sheet_from_tag_async(struct sprite_scope *scope, struct sprite_tag tag)
|
|
{
|
|
__prof;
|
|
return (struct sprite_sheet *)data_from_tag_internal(scope, tag, CACHE_NODE_KIND_SHEET, false);
|
|
}
|
|
|
|
struct sprite_sheet_frame sprite_sheet_get_frame(struct sprite_sheet *sheet, u32 index)
|
|
{
|
|
__prof;
|
|
if (index < sheet->frames_count ) {
|
|
return sheet->frames[index];
|
|
}
|
|
struct sprite_sheet_frame res = ZI;
|
|
res.index = 0;
|
|
res.duration = 0.1;
|
|
res.clip = CLIP_ALL;
|
|
return res;
|
|
}
|
|
|
|
struct sprite_sheet_span sprite_sheet_get_span(struct sprite_sheet *sheet, struct string name)
|
|
{
|
|
__prof;
|
|
struct sprite_sheet_span res = ZI;
|
|
if (sheet->spans_count > 0) {
|
|
struct sprite_sheet_span *entry = fixed_dict_get(&sheet->spans_dict, name);
|
|
if (entry) {
|
|
res = *entry;
|
|
}
|
|
}
|
|
return res;
|
|
}
|
|
|
|
struct sprite_sheet_slice sprite_sheet_get_slice(struct sprite_sheet *sheet, struct string name, u32 frame_index)
|
|
{
|
|
if (sheet->slice_groups_count > 0) {
|
|
struct sprite_sheet_slice_group *group = fixed_dict_get(&sheet->slice_groups_dict, name);
|
|
if (group) {
|
|
return group->frame_slices[frame_index * group->per_frame_count];
|
|
}
|
|
}
|
|
|
|
/* Return 'pivot' by default */
|
|
struct sprite_sheet_slice res = ZI;
|
|
if (string_eq(name, STR("pivot"))) {
|
|
/* 'pivot' slice does not exist, return center */
|
|
res.center = V2(0, 0);
|
|
res.center_px = v2_mul(sheet->frame_size, 0.5f);
|
|
res.dir_px = V2(res.center_px.x, 0);
|
|
res.dir = V2(0, -0.5);
|
|
} else {
|
|
res = sprite_sheet_get_slice(sheet, STR("pivot"), frame_index);
|
|
}
|
|
|
|
return res;
|
|
}
|
|
|
|
struct sprite_sheet_slice_array sprite_sheet_get_slices(struct sprite_sheet *sheet, struct string name, u32 frame_index)
|
|
{
|
|
struct sprite_sheet_slice_array res = ZI;
|
|
if (sheet->slice_groups_count > 0) {
|
|
struct sprite_sheet_slice_group *group = fixed_dict_get(&sheet->slice_groups_dict, name);
|
|
if (group) {
|
|
res.count = group->per_frame_count;
|
|
res.slices = &group->frame_slices[frame_index * group->per_frame_count];
|
|
}
|
|
}
|
|
return res;
|
|
}
|
|
|
|
/* ========================== *
|
|
* Load task
|
|
* ========================== */
|
|
|
|
INTERNAL WORK_TASK_FUNC_DEF(sprite_load_task, arg)
|
|
{
|
|
__prof;
|
|
struct load_cmd *cmd = (struct load_cmd *)arg;
|
|
|
|
struct cache_node *n = cmd->cache_node;
|
|
switch (n->kind) {
|
|
case CACHE_NODE_KIND_TEXTURE: {
|
|
cache_node_load_texture(n, cmd->tag);
|
|
} break;
|
|
case CACHE_NODE_KIND_SHEET: {
|
|
cache_node_load_sheet(n, cmd->tag);
|
|
} break;
|
|
}
|
|
|
|
/* Free cmd */
|
|
node_refcount_add(n, -1);
|
|
{
|
|
struct sys_lock lock = sys_mutex_lock_e(&G.load_cmds_mutex);
|
|
cmd->next_free = G.first_free_load_cmd;
|
|
G.first_free_load_cmd = cmd;
|
|
sys_mutex_unlock(&lock);
|
|
}
|
|
}
|
|
|
|
/* ========================== *
|
|
* Evictor thread
|
|
* ========================== */
|
|
|
|
struct evict_node {
|
|
b32 force_evict;
|
|
struct cache_node_refcount refcount;
|
|
struct cache_node *cache_node;
|
|
struct cache_bucket *cache_bucket;
|
|
struct evict_node *next_consider;
|
|
struct evict_node *next_consider_lru;
|
|
struct evict_node *next_evicted;
|
|
|
|
};
|
|
|
|
INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
|
|
{
|
|
(UNUSED)arg;
|
|
|
|
struct sys_lock evictor_lock = sys_mutex_lock_e(&G.evictor_mutex);
|
|
while (!G.evictor_shutdown) {
|
|
struct temp_arena scratch = scratch_begin_no_conflict();
|
|
struct evict_node *head_consider = NULL;
|
|
struct evict_node *head_consider_lru = NULL;
|
|
struct evict_node *head_evicted = NULL;
|
|
if (!G.evictor_shutdown) {
|
|
u32 cur_cycle = *atomic_u32_raw(&G.evictor_cycle);
|
|
|
|
/* Scan for evictable nodes */
|
|
b32 cache_over_budget = atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET;
|
|
if (cache_over_budget || RESOURCE_RELOADING) {
|
|
__profscope(eviction_scan);
|
|
for (u64 i = 0; i < CACHE_BUCKETS_COUNT; ++i) {
|
|
struct cache_bucket *bucket = &G.cache.buckets[i];
|
|
struct sys_lock bucket_lock = sys_mutex_lock_s(&bucket->mutex);
|
|
{
|
|
struct cache_node *n = bucket->first;
|
|
while (n) {
|
|
b32 consider_for_eviction = false;
|
|
b32 force_evict = false;
|
|
u64 refcount_uncast = atomic_u64_eval(&n->refcount_struct);
|
|
struct cache_node_refcount refcount = *(struct cache_node_refcount *)&refcount_uncast;
|
|
if (refcount.count <= 0) {
|
|
#if RESOURCE_RELOADING
|
|
/* Check if file changed for resource reloading */
|
|
if (!consider_for_eviction) {
|
|
struct string path = string_from_cstr_len((char *)n->tag_path, n->tag_path_len);
|
|
b32 file_changed = false;
|
|
struct sys_datetime current_file_time;
|
|
{
|
|
struct sys_file file = sys_file_open_read(path);
|
|
current_file_time = sys_file_get_time(file).modified;
|
|
sys_file_close(file);
|
|
}
|
|
file_changed = MEMCMP_STRUCT(&n->initial_resource_file_modified_time, ¤t_file_time) != 0;
|
|
if (file_changed) {
|
|
switch (n->kind) {
|
|
case CACHE_NODE_KIND_TEXTURE: {
|
|
logf_info("Resource file for sprite texture \"%F\" has changed. Evicting to allow for reloading.", FMT_STR(path));
|
|
} break;
|
|
case CACHE_NODE_KIND_SHEET: {
|
|
logf_info("Resource file for sprite sheet \"%F\" has changed. Evicting to allow for reloading.", FMT_STR(path));
|
|
} break;
|
|
}
|
|
consider_for_eviction = true;
|
|
force_evict = true;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
#if RESOURCE_RELOADING
|
|
/* Check usage time */
|
|
/* Only check conditional if RESOURCE_RELOADING is enabled,
|
|
* since over-budget is assumed to be true otherwise */
|
|
if (cache_over_budget)
|
|
#endif
|
|
{
|
|
u32 last_used_cycle = refcount.last_modified_cycle;
|
|
f64 time_since_use = (f64)(cur_cycle - last_used_cycle) * EVICTOR_CYCLE_INTERVAL;
|
|
if (time_since_use > EVICTOR_GRACE_PERIOD) {
|
|
/* Cache is over budget and node hasn't been referenced in a while */
|
|
consider_for_eviction = true;
|
|
}
|
|
}
|
|
|
|
}
|
|
|
|
/* Add node to evict list */
|
|
if (consider_for_eviction) {
|
|
struct evict_node *evict_node = arena_push_zero(scratch.arena, struct evict_node);
|
|
evict_node->cache_node = n;
|
|
evict_node->cache_bucket = bucket;
|
|
evict_node->refcount = refcount;
|
|
evict_node->force_evict = force_evict;
|
|
evict_node->next_consider = head_consider;
|
|
head_consider = evict_node;
|
|
}
|
|
|
|
n = n->next_hash;
|
|
}
|
|
}
|
|
sys_mutex_unlock(&bucket_lock);
|
|
}
|
|
}
|
|
|
|
/* Sort evict nodes by usage time */
|
|
if (head_consider) {
|
|
/* TODO: Optimize sort if necessary. Currently O(n^2). */
|
|
__profscope(eviction_sort);
|
|
for (struct evict_node *en = head_consider; en; en = en->next_consider) {
|
|
u32 last_modified_cycle = en->refcount.last_modified_cycle;
|
|
struct evict_node *prev = NULL;
|
|
struct evict_node *next = head_consider_lru;
|
|
while (next && !(last_modified_cycle <= next->refcount.last_modified_cycle || en->force_evict)) {
|
|
prev = next;
|
|
next = next->next_consider_lru;
|
|
}
|
|
if (prev) {
|
|
prev->next_consider_lru = en;
|
|
} else {
|
|
head_consider_lru = en;
|
|
}
|
|
en->next_consider_lru = next;
|
|
}
|
|
}
|
|
|
|
/* Remove evictable nodes from cache table until under budget */
|
|
if (head_consider_lru) {
|
|
__profscope(eviction_cache_removal);
|
|
b32 stop_evicting = false;
|
|
for (struct evict_node *en = head_consider_lru; en && !stop_evicting; en = en->next_consider_lru) {
|
|
struct cache_bucket *bucket = en->cache_bucket;
|
|
struct cache_node *n = en->cache_node;
|
|
struct sys_lock bucket_lock = sys_mutex_lock_e(&bucket->mutex);
|
|
{
|
|
struct cache_node_refcount refcount = *(struct cache_node_refcount *)atomic_u64_raw(&n->refcount_struct);
|
|
if (refcount.count > 0 || ((refcount.last_modified_cycle != en->refcount.last_modified_cycle) && !en->force_evict)) {
|
|
/* Cache node has been referenced since scan, skip eviction. */
|
|
} else if (en->force_evict || atomic_u64_eval(&G.cache.memory_usage) > CACHE_MEMORY_BUDGET) {
|
|
/* Remove from cache bucket */
|
|
if (n->prev_hash) {
|
|
n->prev_hash->next_hash = n->next_hash;
|
|
} else {
|
|
bucket->first = n->next_hash;
|
|
}
|
|
if (n->next_hash) {
|
|
n->next_hash->prev_hash = n->prev_hash;
|
|
}
|
|
atomic_u64_eval_add(&G.cache.memory_usage, -((i64)n->memory_usage));
|
|
/* Add to evicted list */
|
|
en->next_evicted = head_evicted;
|
|
head_evicted = en;
|
|
} else {
|
|
/* Cache is no longer over budget or force evicting, stop iteration */
|
|
stop_evicting = true;
|
|
}
|
|
}
|
|
sys_mutex_unlock(&bucket_lock);
|
|
}
|
|
}
|
|
|
|
if (head_evicted) {
|
|
/* Release evicted node memory */
|
|
{
|
|
__profscope(eviction_memory_release);
|
|
for (struct evict_node *en = head_evicted; en; en = en->next_evicted) {
|
|
struct cache_node *n = en->cache_node;
|
|
if (n->kind == CACHE_NODE_KIND_TEXTURE && n->texture->valid) {
|
|
renderer_texture_release(n->texture->renderer_handle);
|
|
}
|
|
arena_release(&n->arena);
|
|
}
|
|
}
|
|
|
|
/* Add evicted nodes to free list */
|
|
{
|
|
__profscope(eviction_free_list_append);
|
|
struct sys_lock pool_lock = sys_mutex_lock_e(&G.cache.node_pool_mutex);
|
|
for (struct evict_node *en = head_evicted; en; en = en->next_evicted) {
|
|
struct cache_node *n = en->cache_node;
|
|
n->next_free = G.cache.node_pool_first_free;
|
|
G.cache.node_pool_first_free = n;
|
|
}
|
|
sys_mutex_unlock(&pool_lock);
|
|
}
|
|
}
|
|
}
|
|
atomic_u32_inc_eval(&G.evictor_cycle);
|
|
scratch_end(scratch);
|
|
|
|
/* Wait */
|
|
sys_condition_variable_wait_time(&G.evictor_cv, &evictor_lock, EVICTOR_CYCLE_INTERVAL);
|
|
}
|
|
sys_mutex_unlock(&evictor_lock);
|
|
}
|