1372 lines
51 KiB
C
1372 lines
51 KiB
C
#include "sprite.h"
|
|
#include "arena.h"
|
|
#include "log.h"
|
|
#include "sys.h"
|
|
#include "resource.h"
|
|
#include "ase.h"
|
|
#include "util.h"
|
|
#include "atomic.h"
|
|
#include "app.h"
|
|
#include "gp.h"
|
|
#include "math.h"
|
|
#include "rand.h"
|
|
#include "snc.h"
|
|
|
|
/* The evictor will begin evicting once cache usage is > threshold.
|
|
* It will entries until the budget has shrunk < target. */
|
|
#define CACHE_MEMORY_BUDGET_THRESHOLD (MEBI(256))
|
|
#define CACHE_MEMORY_BUDGET_TARGET (MEBI(128))
|
|
STATIC_ASSERT(CACHE_MEMORY_BUDGET_THRESHOLD >= CACHE_MEMORY_BUDGET_TARGET);
|
|
|
|
#define CACHE_BINS_COUNT 1024
|
|
|
|
#define MAX_SCOPE_REFERENCES 1024
|
|
|
|
/* How long between evictor cycles */
|
|
#define EVICTOR_CYCLE_INTERVAL_NS NS_FROM_SECONDS(0.500)
|
|
|
|
/* How many cycles a cache entry spends unused until it's considered evictable */
|
|
#define EVICTOR_GRACE_PERIOD_CYCLES (NS_FROM_SECONDS(10.000) / EVICTOR_CYCLE_INTERVAL_NS)
|
|
|
|
/* Texture arena only used to store texture struct at the moment. Actual image data is allocated on GPU. */
|
|
#define TEXTURE_ARENA_RESERVE MEBI(1)
|
|
|
|
#define SHEET_ARENA_RESERVE MEBI(64)
|
|
#define SHEET_SPAN_LOOKUP_TABLE_BIN_RATIO 2.0
|
|
#define SHEET_SLICE_LOOKUP_TABLE_BIN_RATIO 2.0
|
|
|
|
/* ========================== *
|
|
* Cache structs
|
|
* ========================== */
|
|
|
|
enum cache_entry_kind {
|
|
CACHE_ENTRY_KIND_TEXTURE,
|
|
CACHE_ENTRY_KIND_SHEET,
|
|
|
|
NUM_CACHE_ENTRY_KINDS
|
|
};
|
|
|
|
enum cache_entry_state {
|
|
CACHE_ENTRY_STATE_NONE,
|
|
CACHE_ENTRY_STATE_QUEUED,
|
|
CACHE_ENTRY_STATE_WORKING,
|
|
CACHE_ENTRY_STATE_LOADED
|
|
};
|
|
|
|
struct cache_refcount {
|
|
i32 count; /* Number of scopes currently holding a reference to this entry */
|
|
i32 last_ref_cycle; /* Last evictor cycle that the refcount was modified */
|
|
};
|
|
STATIC_ASSERT(sizeof(struct cache_refcount) == 8); /* Must fit into 64 bit atomic */
|
|
|
|
struct cache_entry_hash {
|
|
u64 v;
|
|
};
|
|
|
|
struct cache_entry {
|
|
enum cache_entry_kind kind;
|
|
struct cache_entry_hash hash;
|
|
struct atomic32 state;
|
|
struct atomic64_padded refcount_struct; /* Cast fetched result to `cache_refcount` */
|
|
|
|
/* Allocated data */
|
|
/* NOTE: This data is finalized once entry state = loaded */
|
|
i64 load_time_ns;
|
|
u64 memory_usage;
|
|
struct arena *arena;
|
|
struct sprite_texture *texture;
|
|
struct sprite_sheet *sheet;
|
|
|
|
/* Hash list */
|
|
struct cache_entry *next_in_bin;
|
|
struct cache_entry *prev_in_bin;
|
|
|
|
/* Free list */
|
|
struct cache_entry *next_free;
|
|
|
|
#if RESOURCE_RELOADING
|
|
struct atomic32 out_of_date; /* Has the resource changed since this entry was loaded */
|
|
#endif
|
|
};
|
|
|
|
struct cache_bin {
|
|
struct snc_mutex mutex;
|
|
struct cache_entry *first;
|
|
struct cache_entry *last;
|
|
};
|
|
|
|
struct cache {
|
|
struct atomic64_padded memory_usage;
|
|
struct arena *arena;
|
|
struct cache_bin *bins;
|
|
struct snc_mutex entry_pool_mutex;
|
|
struct cache_entry *entry_pool_first_free;
|
|
};
|
|
|
|
/* Represents a reference that can be used to safely access cache entry without it becoming evicted during the reference's lifetime */
|
|
struct cache_ref {
|
|
struct cache_entry *e;
|
|
};
|
|
|
|
/* A cache reference whose lifetime is bound to the scope it was retrieved from */
|
|
struct sprite_scope_cache_ref {
|
|
struct cache_ref ref;
|
|
struct sprite_scope_cache_ref *next_in_bin;
|
|
};
|
|
|
|
/* ========================== *
|
|
* Load cmd structs
|
|
* ========================== */
|
|
|
|
struct load_cmd {
|
|
struct load_cmd *next_free;
|
|
struct sprite_scope *scope;
|
|
struct cache_ref ref;
|
|
struct sprite_tag tag;
|
|
u8 tag_path_buff[512];
|
|
};
|
|
|
|
/* ========================== *
|
|
* Global state
|
|
* ========================== */
|
|
|
|
GLOBAL struct {
|
|
struct arena *perm_arena;
|
|
struct sprite_texture *nil_texture;
|
|
struct sprite_texture *loading_texture;
|
|
struct sprite_sheet *nil_sheet;
|
|
struct sprite_sheet *loading_sheet;
|
|
|
|
/* Cache */
|
|
struct cache cache;
|
|
|
|
/* Load cmds */
|
|
struct snc_mutex load_cmds_mutex;
|
|
struct arena *load_cmds_arena;
|
|
struct load_cmd *first_free_load_cmd;
|
|
|
|
/* Scopes */
|
|
struct snc_mutex scopes_mutex;
|
|
struct arena *scopes_arena;
|
|
struct sprite_scope *first_free_scope;
|
|
|
|
/* Evictor */
|
|
struct atomic32_padded evictor_cycle;
|
|
struct snc_counter shutdown_counter;
|
|
b32 evictor_scheduler_shutdown;
|
|
struct snc_mutex evictor_scheduler_mutex;
|
|
struct snc_cv evictor_scheduler_shutdown_cv;
|
|
} G = ZI, DEBUG_ALIAS(G, G_sprite);
|
|
|
|
/* ========================== *
|
|
* Purple-black image
|
|
* ========================== */
|
|
|
|
INTERNAL struct image_rgba generate_purple_black_image(struct arena *arena, u32 width, u32 height)
|
|
{
|
|
u32 *pixels = arena_push_array_no_zero(arena, u32, width * height);
|
|
|
|
/* Create texture containing alternating blocks of purple and black */
|
|
u32 color_size = 4;
|
|
u32 color_1 = 0xFFDC00FF;
|
|
u32 color_2 = 0xFF000000;
|
|
for (u32 x = 0; x < width; ++x) {
|
|
for (u32 y = 0; y < height; ++y) {
|
|
u32 pixel_index = x + width * y;
|
|
if ((y / color_size) % 2 == 0) {
|
|
if ((x / color_size) % 2 == 0) {
|
|
pixels[pixel_index] = color_1;
|
|
} else {
|
|
pixels[pixel_index] = color_2;
|
|
}
|
|
} else {
|
|
if ((x / color_size) % 2 == 0) {
|
|
pixels[pixel_index] = color_2;
|
|
} else {
|
|
pixels[pixel_index] = color_1;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return (struct image_rgba) {
|
|
.width = width,
|
|
.height = height,
|
|
.pixels = pixels
|
|
};
|
|
}
|
|
|
|
/* ========================== *
|
|
* Startup
|
|
* ========================== */
|
|
|
|
INTERNAL SYS_EXIT_FUNC(sprite_shutdown);
|
|
INTERNAL SYS_JOB_DEF(sprite_load_job, arg);
|
|
INTERNAL SYS_JOB_DEF(sprite_evictor_job, _);
|
|
|
|
#if RESOURCE_RELOADING
|
|
INTERNAL RESOURCE_WATCH_CALLBACK_FUNC_DEF(sprite_resource_watch_callback, info);
|
|
#endif
|
|
|
|
struct sprite_startup_receipt sprite_startup(void)
|
|
{
|
|
__prof;
|
|
G.perm_arena = arena_alloc(MEBI(1));
|
|
{
|
|
/* Init loading texture */
|
|
G.loading_texture = arena_push(G.perm_arena, struct sprite_texture);
|
|
|
|
/* Init nil texture */
|
|
G.nil_texture = arena_push(G.perm_arena, struct sprite_texture);
|
|
G.nil_texture->loaded = 1;
|
|
{
|
|
struct arena_temp scratch = scratch_begin_no_conflict();
|
|
struct image_rgba purple_black_image = generate_purple_black_image(scratch.arena, 64, 64);
|
|
G.nil_texture->gp_texture = gp_texture_alloc(GP_TEXTURE_FORMAT_R8G8B8A8_UNORM, 0, V2I32(purple_black_image.width, purple_black_image.height), purple_black_image.pixels);
|
|
scratch_end(scratch);
|
|
}
|
|
|
|
/* Init loading sheet */
|
|
G.loading_sheet = arena_push(G.perm_arena, struct sprite_sheet);
|
|
G.loading_sheet->image_size = V2(IMAGE_PIXELS_PER_UNIT, IMAGE_PIXELS_PER_UNIT);
|
|
G.loading_sheet->frame_size = V2(IMAGE_PIXELS_PER_UNIT, IMAGE_PIXELS_PER_UNIT);
|
|
|
|
/* Init nil sheet */
|
|
G.nil_sheet = arena_push(G.perm_arena, struct sprite_sheet);
|
|
G.nil_sheet->image_size = V2(IMAGE_PIXELS_PER_UNIT, IMAGE_PIXELS_PER_UNIT);
|
|
G.nil_sheet->frame_size = V2(IMAGE_PIXELS_PER_UNIT, IMAGE_PIXELS_PER_UNIT);
|
|
G.nil_sheet->loaded = 1;
|
|
}
|
|
arena_set_readonly(G.perm_arena);
|
|
|
|
G.cache.arena = arena_alloc(GIBI(64));
|
|
G.cache.bins = arena_push_array(G.cache.arena, struct cache_bin, CACHE_BINS_COUNT);
|
|
|
|
G.load_cmds_arena = arena_alloc(GIBI(64));
|
|
|
|
G.scopes_arena = arena_alloc(GIBI(64));
|
|
|
|
sys_run(1, sprite_evictor_job, 0, SYS_POOL_BACKGROUND, SYS_PRIORITY_LOW, &G.shutdown_counter);
|
|
|
|
sys_on_exit(&sprite_shutdown);
|
|
resource_register_watch_callback(&sprite_resource_watch_callback);
|
|
|
|
return (struct sprite_startup_receipt) { 0 };
|
|
}
|
|
|
|
INTERNAL SYS_EXIT_FUNC(sprite_shutdown)
|
|
{
|
|
__prof;
|
|
/* Signal evictor shutdown */
|
|
{
|
|
struct snc_lock lock = snc_lock_e(&G.evictor_scheduler_mutex);
|
|
G.evictor_scheduler_shutdown = 1;
|
|
snc_cv_signal(&G.evictor_scheduler_shutdown_cv, I32_MAX);
|
|
snc_unlock(&lock);
|
|
}
|
|
/* Wait for evictor shutdown */
|
|
snc_counter_wait(&G.shutdown_counter);
|
|
}
|
|
|
|
/* ========================== *
|
|
* Tag
|
|
* ========================== */
|
|
|
|
struct sprite_tag sprite_tag_from_path(struct string path)
|
|
{
|
|
struct sprite_tag res = ZI;
|
|
res.hash = hash_fnv64(HASH_FNV64_BASIS, path);
|
|
res.path = path;
|
|
return res;
|
|
}
|
|
|
|
b32 sprite_tag_is_nil(struct sprite_tag tag)
|
|
{
|
|
return tag.hash == 0;
|
|
}
|
|
|
|
b32 sprite_tag_eq(struct sprite_tag t1, struct sprite_tag t2)
|
|
{
|
|
return t1.hash == t2.hash;
|
|
}
|
|
|
|
INTERNAL struct cache_entry_hash cache_entry_hash_from_tag_hash(u64 tag_hash, enum cache_entry_kind kind)
|
|
{
|
|
return (struct cache_entry_hash) { .v = rand_u64_from_seed(tag_hash + kind) };
|
|
}
|
|
|
|
/* ========================== *
|
|
* Load
|
|
* ========================== */
|
|
|
|
INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref_from_ref(struct sprite_scope *scope, struct cache_ref ref);
|
|
INTERNAL void push_load_job(struct cache_ref ref, struct sprite_tag tag)
|
|
{
|
|
struct load_cmd *cmd = 0;
|
|
{
|
|
struct snc_lock lock = snc_lock_e(&G.load_cmds_mutex);
|
|
if (G.first_free_load_cmd) {
|
|
cmd = G.first_free_load_cmd;
|
|
G.first_free_load_cmd = cmd->next_free;
|
|
} else {
|
|
cmd = arena_push_no_zero(G.load_cmds_arena, struct load_cmd);
|
|
}
|
|
snc_unlock(&lock);
|
|
}
|
|
MEMZERO_STRUCT(cmd);
|
|
|
|
/* Initialize cmd */
|
|
cmd->scope = sprite_scope_begin();
|
|
cmd->ref = scope_ensure_ref_from_ref(cmd->scope, ref)->ref;
|
|
cmd->tag = tag;
|
|
{
|
|
u64 copy_len = min_u64(tag.path.len, countof(cmd->tag_path_buff));
|
|
cmd->tag.path.text = cmd->tag_path_buff;
|
|
MEMCPY(cmd->tag.path.text, tag.path.text, copy_len);
|
|
}
|
|
|
|
/* Push work */
|
|
sys_run(1, sprite_load_job, cmd, SYS_POOL_BACKGROUND, SYS_PRIORITY_INHERIT, 0);
|
|
}
|
|
|
|
INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag tag)
|
|
{
|
|
__prof;
|
|
struct arena_temp scratch = scratch_begin_no_conflict();
|
|
struct cache_entry *e = ref.e;
|
|
|
|
atomic32_fetch_set(&e->state, CACHE_ENTRY_STATE_WORKING);
|
|
struct string path = tag.path;
|
|
|
|
logf_info("Loading sprite texture [%F] \"%F\"", FMT_HEX(e->hash.v), FMT_STR(path));
|
|
b32 success = 0;
|
|
i64 start_ns = sys_time_ns();
|
|
|
|
ASSERT(string_ends_with(path, LIT(".ase")));
|
|
ASSERT(e->kind == CACHE_ENTRY_KIND_TEXTURE);
|
|
|
|
/* TODO: Replace arena allocs w/ buddy allocator */
|
|
/* TODO: Arena probably overkill. Just using it to store texture struct. */
|
|
e->arena = arena_alloc(TEXTURE_ARENA_RESERVE);
|
|
u64 memory_size = 0;
|
|
{
|
|
/* Decode */
|
|
struct ase_decode_image_result decoded = ZI;
|
|
{
|
|
struct resource texture_rs = resource_open(path);
|
|
if (resource_exists(&texture_rs)) {
|
|
decoded = ase_decode_image(scratch.arena, resource_get_data(&texture_rs));
|
|
} else {
|
|
logf_error("Sprite texture for \"%F\" not found", FMT_STR(path));
|
|
}
|
|
resource_close(&texture_rs);
|
|
}
|
|
|
|
if (decoded.success) {
|
|
/* Initialize */
|
|
e->texture = arena_push(e->arena, struct sprite_texture);
|
|
e->texture->width = decoded.image.width;
|
|
e->texture->height = decoded.image.height;
|
|
e->texture->valid = 1;
|
|
e->texture->loaded = 1;
|
|
e->texture->gp_texture = gp_texture_alloc(GP_TEXTURE_FORMAT_R8G8B8A8_UNORM_SRGB, 0, V2I32(decoded.image.width, decoded.image.height), decoded.image.pixels);
|
|
/* TODO: Query gpu for more accurate texture size in VRAM */
|
|
memory_size += (decoded.image.width * decoded.image.height) * sizeof(*decoded.image.pixels);
|
|
success = 1;
|
|
}
|
|
}
|
|
arena_set_readonly(e->arena);
|
|
e->memory_usage = e->arena->committed + memory_size;
|
|
atomic64_fetch_add(&G.cache.memory_usage.v, e->memory_usage);
|
|
|
|
if (success) {
|
|
logf_success("Loaded sprite texture [%F] \"%F\" in %F seconds (cache size: %F bytes).",
|
|
FMT_HEX(e->hash.v),
|
|
FMT_STR(path),
|
|
FMT_FLOAT(SECONDS_FROM_NS(sys_time_ns() - start_ns)),
|
|
FMT_UINT(e->memory_usage));
|
|
}
|
|
|
|
atomic32_fetch_set(&e->state, CACHE_ENTRY_STATE_LOADED);
|
|
|
|
#if RESOURCE_RELOADING
|
|
struct cache_bin *bin = &G.cache.bins[e->hash.v % CACHE_BINS_COUNT];
|
|
struct snc_lock bin_lock = snc_lock_e(&bin->mutex);
|
|
{
|
|
for (struct cache_entry *old_entry = bin->first; old_entry; old_entry = old_entry->next_in_bin) {
|
|
if (old_entry != e && old_entry->hash.v == e->hash.v) {
|
|
atomic32_fetch_set(&old_entry->out_of_date, 1);
|
|
}
|
|
}
|
|
e->load_time_ns = sys_time_ns();
|
|
}
|
|
snc_unlock(&bin_lock);
|
|
#endif
|
|
|
|
scratch_end(scratch);
|
|
}
|
|
|
|
INTERNAL struct sprite_sheet init_sheet_from_ase_result(struct arena *arena, struct ase_decode_sheet_result ase)
|
|
{
|
|
__prof;
|
|
struct sprite_sheet sheet = ZI;
|
|
|
|
ASSERT(ase.num_frames >= 1);
|
|
|
|
struct v2 frame_size = ase.frame_size;
|
|
struct v2 frame_center = v2_mul(ase.frame_size, 0.5f);
|
|
|
|
/* Init frames */
|
|
{
|
|
__profn("Init frames");
|
|
sheet.image_size = ase.image_size;
|
|
sheet.frame_size = ase.frame_size;
|
|
sheet.frames = arena_push_array(arena, struct sprite_sheet_frame, ase.num_frames);
|
|
sheet.frames_count = ase.num_frames;
|
|
for (struct ase_frame *ase_frame = ase.frame_head; ase_frame; ase_frame = ase_frame->next) {
|
|
u32 index = ase_frame->index;
|
|
|
|
struct v2 clip_p1 = { (f32)ase_frame->x1 / (f32)ase.image_size.x, (f32)ase_frame->y1 / (f32)ase.image_size.y };
|
|
struct v2 clip_p2 = { (f32)ase_frame->x2 / (f32)ase.image_size.x, (f32)ase_frame->y2 / (f32)ase.image_size.y };
|
|
|
|
sheet.frames[index] = (struct sprite_sheet_frame) {
|
|
.index = index,
|
|
.duration = ase_frame->duration,
|
|
.clip = (struct clip_rect) { clip_p1, clip_p2 }
|
|
};
|
|
}
|
|
}
|
|
|
|
/* Init spans */
|
|
sheet.spans_count = ase.num_spans;
|
|
if (ase.num_spans > 0) {
|
|
__profn("Init spans");
|
|
sheet.spans = arena_push_array(arena, struct sprite_sheet_span, sheet.spans_count);
|
|
sheet.spans_dict = dict_init(arena, (u64)(ase.num_spans * SHEET_SPAN_LOOKUP_TABLE_BIN_RATIO));
|
|
u64 index = 0;
|
|
for (struct ase_span *ase_span = ase.span_head; ase_span; ase_span = ase_span->next) {
|
|
struct string name = string_copy(arena, ase_span->name);
|
|
struct sprite_sheet_span *span = &sheet.spans[index];
|
|
span->name = name;
|
|
span->start = ase_span->start;
|
|
span->end = ase_span->end;
|
|
u64 hash = hash_fnv64(HASH_FNV64_BASIS, name);
|
|
dict_set(arena, sheet.spans_dict, hash, (u64)span);
|
|
++index;
|
|
}
|
|
}
|
|
|
|
/* Init slices */
|
|
if (ase.num_slice_keys > 0) {
|
|
__profn("Init slices");
|
|
struct arena_temp scratch = scratch_begin(arena);
|
|
|
|
struct temp_ase_slice_key_node {
|
|
struct ase_slice_key *key;
|
|
struct temp_ase_slice_key_node *next;
|
|
|
|
u32 index_in_frame;
|
|
u32 earliest_frame;
|
|
};
|
|
|
|
struct temp_slice_group_node {
|
|
struct string name;
|
|
u64 per_frame_count;
|
|
struct temp_ase_slice_key_node *temp_ase_slice_key_head;
|
|
struct temp_slice_group_node *next;
|
|
|
|
struct sprite_sheet_slice_group *final_slice_group;
|
|
};
|
|
|
|
/* Group slices by name and find out counts per frame */
|
|
u64 num_temp_slice_group_nodes = 0;
|
|
struct temp_slice_group_node *temp_slice_group_head = 0;
|
|
{
|
|
struct dict *temp_slice_dict = dict_init(scratch.arena, (u64)(ase.num_slice_keys * 2));
|
|
for (struct ase_slice_key *ase_slice_key = ase.slice_key_head; ase_slice_key; ase_slice_key = ase_slice_key->next) {
|
|
struct string name = ase_slice_key->name;
|
|
u64 hash = hash_fnv64(HASH_FNV64_BASIS, name);
|
|
struct temp_slice_group_node *temp_slice_group_node = (struct temp_slice_group_node *)dict_get(temp_slice_dict, hash);
|
|
if (!temp_slice_group_node) {
|
|
temp_slice_group_node = arena_push(scratch.arena, struct temp_slice_group_node);
|
|
temp_slice_group_node->name = name;
|
|
dict_set(scratch.arena, temp_slice_dict, hash, (u64)temp_slice_group_node);
|
|
|
|
++num_temp_slice_group_nodes;
|
|
temp_slice_group_node->next = temp_slice_group_head;
|
|
temp_slice_group_head = temp_slice_group_node;
|
|
}
|
|
|
|
struct temp_ase_slice_key_node *node = arena_push(scratch.arena, struct temp_ase_slice_key_node);
|
|
node->key = ase_slice_key;
|
|
node->next = temp_slice_group_node->temp_ase_slice_key_head;
|
|
node->earliest_frame = U32_MAX; /* To be overwritten later after iterating */
|
|
|
|
temp_slice_group_node->temp_ase_slice_key_head = node;
|
|
|
|
++temp_slice_group_node->per_frame_count;
|
|
}
|
|
}
|
|
|
|
/* Allocate slice groups & fill originals in 2d array */
|
|
sheet.slice_groups_count = num_temp_slice_group_nodes;
|
|
sheet.slice_groups = arena_push_array(arena, struct sprite_sheet_slice_group, sheet.slice_groups_count);
|
|
sheet.slice_groups_dict = dict_init(arena, (u64)(num_temp_slice_group_nodes * SHEET_SLICE_LOOKUP_TABLE_BIN_RATIO));
|
|
|
|
u64 index = 0;
|
|
for (struct temp_slice_group_node *temp_slice_group_node = temp_slice_group_head; temp_slice_group_node; temp_slice_group_node = temp_slice_group_node->next) {
|
|
struct sprite_sheet_slice_group *slice_group = &sheet.slice_groups[index];
|
|
slice_group->name = string_copy(arena, temp_slice_group_node->name);
|
|
slice_group->per_frame_count = temp_slice_group_node->per_frame_count;
|
|
|
|
slice_group->frame_slices = arena_push_array(arena, struct sprite_sheet_slice, ase.num_frames * slice_group->per_frame_count);
|
|
|
|
u64 index_in_frame = 0;
|
|
for (struct temp_ase_slice_key_node *node = temp_slice_group_node->temp_ase_slice_key_head; node; node = node->next) {
|
|
struct ase_slice_key *key = node->key;
|
|
|
|
for (struct ase_slice *ase_slice = key->slice_head; ase_slice; ase_slice = ase_slice->next) {
|
|
u32 start = ase_slice->start;
|
|
|
|
struct sprite_sheet_slice *slice = &slice_group->frame_slices[(start * slice_group->per_frame_count) + index_in_frame];
|
|
slice->original = 1;
|
|
|
|
f32 x1_px = ase_slice->x1;
|
|
f32 y1_px = ase_slice->y1;
|
|
f32 x2_px = ase_slice->x2;
|
|
f32 y2_px = ase_slice->y2;
|
|
f32 width_px = x2_px - x1_px;
|
|
f32 height_px = y2_px - y1_px;
|
|
|
|
f32 x1 = (x1_px - frame_center.x) / frame_size.x;
|
|
f32 y1 = (y1_px - frame_center.y) / frame_size.y;
|
|
f32 x2 = (x2_px - frame_center.x) / frame_size.x;
|
|
f32 y2 = (y2_px - frame_center.y) / frame_size.y;
|
|
f32 width = x2 - x1;
|
|
f32 height = y2 - y1;
|
|
|
|
/* Rect */
|
|
struct rect rect_px = RECT(x1_px, y1_px, width_px, height_px);
|
|
struct rect rect = RECT(x1, y1, width, height);
|
|
/* Center */
|
|
struct v2 center_px = V2(x1_px + (width_px * 0.5f), y1_px + (height_px * 0.5f));
|
|
struct v2 center = V2(x1 + (width * 0.5f), y1 + (height * 0.5f));
|
|
/* Dir */
|
|
struct v2 dir_px = V2(center_px.x, -1);
|
|
struct v2 dir = V2(0, -1);
|
|
|
|
slice->rect_px = rect_px;
|
|
slice->center_px = center_px;
|
|
slice->dir_px = dir_px;
|
|
|
|
slice->rect = rect;
|
|
slice->center = center;
|
|
slice->dir = dir;
|
|
|
|
node->index_in_frame = index_in_frame;
|
|
if (start < node->earliest_frame) {
|
|
node->earliest_frame = start;
|
|
}
|
|
}
|
|
|
|
++index_in_frame;
|
|
}
|
|
|
|
temp_slice_group_node->final_slice_group = slice_group;
|
|
u64 hash = hash_fnv64(HASH_FNV64_BASIS, slice_group->name);
|
|
dict_set(arena, sheet.slice_groups_dict, hash, (u64)slice_group);
|
|
++index;
|
|
}
|
|
|
|
/* Propagate original slices into next frames (and first slices into previous frames) */
|
|
for (struct temp_slice_group_node *temp_slice_group_node = temp_slice_group_head; temp_slice_group_node; temp_slice_group_node = temp_slice_group_node->next) {
|
|
struct sprite_sheet_slice_group *slice_group = temp_slice_group_node->final_slice_group;
|
|
|
|
for (struct temp_ase_slice_key_node *node = temp_slice_group_node->temp_ase_slice_key_head; node; node = node->next) {
|
|
struct ase_slice_key *key = node->key;
|
|
u32 index_in_frame = node->index_in_frame;
|
|
for (struct ase_slice *ase_slice = key->slice_head; ase_slice; ase_slice = ase_slice->next) {
|
|
u32 start = ase_slice->start;
|
|
|
|
struct sprite_sheet_slice *slice = &slice_group->frame_slices[(start * slice_group->per_frame_count) + index_in_frame];
|
|
|
|
/* Propagate earliest slice to all previous frames */
|
|
if (start == node->earliest_frame && start > 0) {
|
|
for (u32 i = start; i-- > 0;) {
|
|
struct sprite_sheet_slice *target = &slice_group->frame_slices[(i * slice_group->per_frame_count) + index_in_frame];
|
|
*target = *slice;
|
|
target->original = 0;
|
|
}
|
|
}
|
|
|
|
/* Propagate slice to forward frames until original is found */
|
|
for (u32 i = start + 1; i < ase.num_frames; ++i) {
|
|
struct sprite_sheet_slice *target = &slice_group->frame_slices[(i * slice_group->per_frame_count) + index_in_frame];
|
|
if (target->original) {
|
|
break;
|
|
} else {
|
|
*target = *slice;
|
|
target->original = 0;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Calculate direction vectors */
|
|
for (struct temp_slice_group_node *temp_slice_group_node = temp_slice_group_head; temp_slice_group_node; temp_slice_group_node = temp_slice_group_node->next) {
|
|
struct string ray_suffix = LIT(".ray");
|
|
|
|
struct sprite_sheet_slice_group *ray_slice_group = temp_slice_group_node->final_slice_group;
|
|
struct string ray_slice_name = ray_slice_group->name;
|
|
if (string_ends_with(ray_slice_name, ray_suffix)) {
|
|
struct string point_slice_name = ray_slice_name;
|
|
point_slice_name.len -= ray_suffix.len;
|
|
u64 hash = hash_fnv64(HASH_FNV64_BASIS, point_slice_name);
|
|
struct sprite_sheet_slice_group *point_slice_group = (struct sprite_sheet_slice_group *)dict_get(sheet.slice_groups_dict, hash);
|
|
if (point_slice_group) {
|
|
u32 point_slices_per_frame = point_slice_group->per_frame_count;
|
|
|
|
for (u32 i = 0; i < ase.num_frames; ++i) {
|
|
/* Use ray slice in ray group */
|
|
struct sprite_sheet_slice *ray_slice = &ray_slice_group->frame_slices[i * point_slices_per_frame];
|
|
struct v2 ray_end = ray_slice->center_px;
|
|
struct v2 ray_end_norm = ray_slice->center;
|
|
|
|
/* Apply to each point slice in point group */
|
|
for (u32 j = 0; j < point_slices_per_frame; ++j) {
|
|
struct sprite_sheet_slice *point_slice = &point_slice_group->frame_slices[(i * point_slices_per_frame) + j];
|
|
point_slice->dir_px = v2_sub(ray_end, point_slice->center_px);
|
|
point_slice->dir = v2_sub(ray_end_norm, point_slice->center);
|
|
point_slice->has_ray = 1;
|
|
}
|
|
}
|
|
|
|
}
|
|
}
|
|
}
|
|
|
|
scratch_end(scratch);
|
|
}
|
|
|
|
return sheet;
|
|
}
|
|
|
|
INTERNAL void cache_entry_load_sheet(struct cache_ref ref, struct sprite_tag tag)
|
|
{
|
|
__prof;
|
|
struct arena_temp scratch = scratch_begin_no_conflict();
|
|
struct cache_entry *e = ref.e;
|
|
|
|
atomic32_fetch_set(&e->state, CACHE_ENTRY_STATE_WORKING);
|
|
struct string path = tag.path;
|
|
|
|
logf_info("Loading sprite sheet [%F] \"%F\"", FMT_HEX(e->hash.v), FMT_STR(path));
|
|
b32 success = 0;
|
|
i64 start_ns = sys_time_ns();
|
|
|
|
ASSERT(e->kind == CACHE_ENTRY_KIND_SHEET);
|
|
|
|
/* TODO: Replace arena allocs w/ buddy allocator */
|
|
e->arena = arena_alloc(SHEET_ARENA_RESERVE);
|
|
{
|
|
/* Decode */
|
|
struct ase_decode_sheet_result decoded = ZI;
|
|
{
|
|
struct resource sheet_rs = resource_open(path);
|
|
if (resource_exists(&sheet_rs)) {
|
|
decoded = ase_decode_sheet(scratch.arena, resource_get_data(&sheet_rs));
|
|
} else {
|
|
logf_error("Sprite sheet for \"%F\" not found", FMT_STR(path));
|
|
}
|
|
resource_close(&sheet_rs);
|
|
}
|
|
|
|
if (decoded.success) {
|
|
struct resource sheet_rs = resource_open(path);
|
|
decoded = ase_decode_sheet(scratch.arena, resource_get_data(&sheet_rs));
|
|
resource_close(&sheet_rs);
|
|
|
|
/* Initialize */
|
|
e->sheet = arena_push_no_zero(e->arena, struct sprite_sheet);
|
|
*e->sheet = init_sheet_from_ase_result(e->arena, decoded);
|
|
e->sheet->loaded = 1;
|
|
e->sheet->valid = 1;
|
|
|
|
success = 1;
|
|
}
|
|
}
|
|
arena_set_readonly(e->arena);
|
|
e->memory_usage = e->arena->committed;
|
|
atomic64_fetch_add(&G.cache.memory_usage.v, e->memory_usage);
|
|
|
|
if (success) {
|
|
logf_success("Loaded sprite sheet [%F] \"%F\" in %F seconds (cache size: %F bytes).",
|
|
FMT_HEX(e->hash.v),
|
|
FMT_STR(path),
|
|
FMT_FLOAT(SECONDS_FROM_NS(sys_time_ns() - start_ns)),
|
|
FMT_UINT(e->memory_usage));
|
|
}
|
|
|
|
atomic32_fetch_set(&e->state, CACHE_ENTRY_STATE_LOADED);
|
|
|
|
#if RESOURCE_RELOADING
|
|
struct cache_bin *bin = &G.cache.bins[e->hash.v % CACHE_BINS_COUNT];
|
|
struct snc_lock bin_lock = snc_lock_e(&bin->mutex);
|
|
{
|
|
for (struct cache_entry *old_entry = bin->first; old_entry; old_entry = old_entry->next_in_bin) {
|
|
if (old_entry != e && old_entry->hash.v == e->hash.v) {
|
|
atomic32_fetch_set(&old_entry->out_of_date, 1);
|
|
}
|
|
}
|
|
e->load_time_ns = sys_time_ns();
|
|
}
|
|
snc_unlock(&bin_lock);
|
|
#endif
|
|
|
|
scratch_end(scratch);
|
|
}
|
|
|
|
/* ========================== *
|
|
* Scope
|
|
* ========================== */
|
|
|
|
INTERNAL void refcount_add(struct cache_entry *e, i32 amount)
|
|
{
|
|
i32 evictor_cycle = atomic32_fetch(&G.evictor_cycle.v);
|
|
struct atomic64 *refcount_atomic = &e->refcount_struct.v;
|
|
u64 old_refcount_uncast = atomic64_fetch(refcount_atomic);
|
|
for (;;) {
|
|
struct cache_refcount new_refcount = *(struct cache_refcount *)&old_refcount_uncast;
|
|
new_refcount.count += amount;
|
|
new_refcount.last_ref_cycle = evictor_cycle;
|
|
u64 v = atomic64_fetch_test_set(refcount_atomic, old_refcount_uncast, *(u64 *)&new_refcount);
|
|
if (v == old_refcount_uncast) {
|
|
ASSERT(new_refcount.count >= 0);
|
|
break;
|
|
}
|
|
old_refcount_uncast = v;
|
|
}
|
|
}
|
|
|
|
INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref_unsafe(struct sprite_scope *scope, struct cache_entry *e)
|
|
{
|
|
u64 bin_index = e->hash.v % CACHE_BINS_COUNT;
|
|
|
|
struct sprite_scope_cache_ref **slot = &scope->ref_node_bins[bin_index];
|
|
while (*slot) {
|
|
if ((*slot)->ref.e == e) {
|
|
/* Found reference in scope */
|
|
break;
|
|
} else {
|
|
slot = &(*slot)->next_in_bin;
|
|
}
|
|
}
|
|
|
|
if (*slot == 0) {
|
|
if (scope->num_references >= MAX_SCOPE_REFERENCES) {
|
|
sys_panic(LIT("Max sprite scope references reached"));
|
|
}
|
|
|
|
/* Increment refcount */
|
|
refcount_add(e, 1);
|
|
|
|
/* Grab node from pool */
|
|
struct sprite_scope_cache_ref *scope_ref = &scope->ref_node_pool[scope->num_references++];
|
|
MEMZERO_STRUCT(scope_ref);
|
|
scope_ref->ref.e = e;
|
|
|
|
*slot = scope_ref;
|
|
}
|
|
|
|
return *slot;
|
|
}
|
|
|
|
INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref_from_entry(struct sprite_scope *scope, struct cache_entry *e, struct snc_lock *bin_lock)
|
|
{
|
|
/* Guaranteed safe if caller has lock on entry's bin, since entry may not have an existing reference and could otherwise be evicted while ensuring this reference */
|
|
snc_assert_locked_e_or_s(bin_lock, &G.cache.bins[e->hash.v % CACHE_BINS_COUNT].mutex);
|
|
return scope_ensure_ref_unsafe(scope, e);
|
|
}
|
|
|
|
INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref_from_ref(struct sprite_scope *scope, struct cache_ref ref)
|
|
{
|
|
/* Safe since caller has ref */
|
|
return scope_ensure_ref_unsafe(scope, ref.e);
|
|
}
|
|
|
|
struct sprite_scope *sprite_scope_begin(void)
|
|
{
|
|
/* Alloc scope */
|
|
struct sprite_scope *res = 0;
|
|
struct sprite_scope_cache_ref **bins = 0;
|
|
struct sprite_scope_cache_ref *pool = 0;
|
|
{
|
|
struct snc_lock lock = snc_lock_e(&G.scopes_mutex);
|
|
{
|
|
if (G.first_free_scope) {
|
|
res = G.first_free_scope;
|
|
G.first_free_scope = res->next_free;
|
|
bins = res->ref_node_bins;
|
|
pool = res->ref_node_pool;
|
|
} else {
|
|
res = arena_push_no_zero(G.scopes_arena, struct sprite_scope);
|
|
bins = arena_push_array_no_zero(G.scopes_arena, struct sprite_scope_cache_ref *, CACHE_BINS_COUNT);
|
|
pool = arena_push_array_no_zero(G.scopes_arena, struct sprite_scope_cache_ref, MAX_SCOPE_REFERENCES);
|
|
}
|
|
}
|
|
snc_unlock(&lock);
|
|
}
|
|
MEMZERO_STRUCT(res);
|
|
MEMZERO(bins, sizeof(*bins) * CACHE_BINS_COUNT);
|
|
res->ref_node_bins = bins;
|
|
res->ref_node_pool = pool;
|
|
return res;
|
|
}
|
|
|
|
void sprite_scope_end(struct sprite_scope *scope)
|
|
{
|
|
/* Dereference entries */
|
|
u64 num_references = scope->num_references;
|
|
for (u64 i = 0; i < num_references; ++i) {
|
|
struct sprite_scope_cache_ref *n = &scope->ref_node_pool[i];
|
|
refcount_add(n->ref.e, -1);
|
|
}
|
|
|
|
/* Release scope */
|
|
struct snc_lock lock = snc_lock_e(&G.scopes_mutex);
|
|
{
|
|
scope->next_free = G.first_free_scope;
|
|
G.first_free_scope = scope;
|
|
}
|
|
snc_unlock(&lock);
|
|
}
|
|
|
|
/* ========================== *
|
|
* Cache interface
|
|
* ========================== */
|
|
|
|
INTERNAL struct sprite_scope_cache_ref *cache_lookup(struct sprite_scope *scope, struct cache_entry_hash hash, struct snc_lock *bin_lock)
|
|
{
|
|
struct sprite_scope_cache_ref *scope_ref = 0;
|
|
|
|
struct cache_bin *bin = &G.cache.bins[hash.v % CACHE_BINS_COUNT];
|
|
snc_assert_locked_e_or_s(bin_lock, &bin->mutex); /* Lock required for iterating bin */
|
|
|
|
#if RESOURCE_RELOADING
|
|
/* If resource reloading is enabled, then we want to find the
|
|
* newest entry rather than the first one that exists since
|
|
* there may be more than one matching entry in the cache */
|
|
struct cache_entry *match = 0;
|
|
enum cache_entry_state match_state = CACHE_ENTRY_STATE_NONE;
|
|
for (struct cache_entry *entry = bin->first; entry; entry = entry->next_in_bin) {
|
|
if (entry->hash.v == hash.v) {
|
|
enum cache_entry_state entry_state = atomic32_fetch(&entry->state);
|
|
if (!match || entry_state > match_state || (entry_state == CACHE_ENTRY_STATE_LOADED && match_state == CACHE_ENTRY_STATE_LOADED && entry->load_time_ns > match->load_time_ns)) {
|
|
match = entry;
|
|
match_state = entry_state;
|
|
}
|
|
}
|
|
}
|
|
if (match) {
|
|
scope_ref = scope_ensure_ref_from_entry(scope, match, bin_lock);
|
|
}
|
|
#else
|
|
for (struct cache_entry *entry = bin->first; entry; entry = entry->next_in_bin) {
|
|
if (entry->hash.v == hash.v) {
|
|
scope_ref = scope_ensure_ref_from_entry(scope, entry, bin_lock);
|
|
break;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
return scope_ref;
|
|
}
|
|
|
|
INTERNAL struct sprite_scope_cache_ref *cache_entry_from_tag(struct sprite_scope *scope, struct sprite_tag tag, enum cache_entry_kind kind, b32 force_new)
|
|
{
|
|
struct cache_entry_hash hash = cache_entry_hash_from_tag_hash(tag.hash, kind);
|
|
u64 bin_index = hash.v % CACHE_BINS_COUNT;
|
|
struct sprite_scope_cache_ref *scope_ref = 0;
|
|
|
|
/* Search for entry in scope */
|
|
if (!force_new) {
|
|
scope_ref = scope->ref_node_bins[bin_index];
|
|
while (scope_ref) {
|
|
if (scope_ref->ref.e->hash.v == hash.v) {
|
|
break;
|
|
}
|
|
scope_ref = scope_ref->next_in_bin;
|
|
}
|
|
}
|
|
|
|
/* If not in scope, search for entry in cache */
|
|
if (!scope_ref) {
|
|
struct cache_bin *bin = &G.cache.bins[bin_index];
|
|
|
|
/* Search in cache */
|
|
if (!force_new) {
|
|
struct snc_lock bin_lock = snc_lock_s(&bin->mutex);
|
|
{
|
|
scope_ref = cache_lookup(scope, hash, &bin_lock);
|
|
}
|
|
snc_unlock(&bin_lock);
|
|
}
|
|
|
|
/* If not in cache, allocate new entry */
|
|
if (!scope_ref) {
|
|
struct snc_lock bin_lock = snc_lock_e(&bin->mutex);
|
|
{
|
|
/* Search cache one more time in case an entry was allocated between locks */
|
|
if (!force_new) {
|
|
scope_ref = cache_lookup(scope, hash, &bin_lock);
|
|
}
|
|
|
|
if (!scope_ref) {
|
|
/* Cache entry still absent, allocate new entry */
|
|
struct cache_entry *entry = 0;
|
|
{
|
|
struct snc_lock pool_lock = snc_lock_e(&G.cache.entry_pool_mutex);
|
|
if (G.cache.entry_pool_first_free) {
|
|
entry = G.cache.entry_pool_first_free;
|
|
G.cache.entry_pool_first_free = entry->next_free;
|
|
} else {
|
|
entry = arena_push_no_zero(G.cache.arena, struct cache_entry);
|
|
}
|
|
snc_unlock(&pool_lock);
|
|
}
|
|
MEMZERO_STRUCT(entry);
|
|
|
|
/* Init entry and add to bin */
|
|
{
|
|
if (bin->last) {
|
|
bin->last->next_in_bin = entry;
|
|
entry->prev_in_bin = bin->last;
|
|
} else {
|
|
bin->first = entry;
|
|
}
|
|
bin->last = entry;
|
|
}
|
|
entry->hash = cache_entry_hash_from_tag_hash(tag.hash, kind);
|
|
entry->kind = kind;
|
|
entry->texture = G.nil_texture;
|
|
entry->sheet = G.nil_sheet;
|
|
|
|
scope_ref = scope_ensure_ref_from_entry(scope, entry, &bin_lock);
|
|
}
|
|
}
|
|
snc_unlock(&bin_lock);
|
|
}
|
|
}
|
|
|
|
return scope_ref;
|
|
}
|
|
|
|
INTERNAL void *data_from_tag_internal(struct sprite_scope *scope, struct sprite_tag tag, enum cache_entry_kind kind, b32 await)
|
|
{
|
|
/* TODO: Replace switch statements */
|
|
void *res = 0;
|
|
switch (kind) {
|
|
case CACHE_ENTRY_KIND_TEXTURE: { res = G.loading_texture; } break;
|
|
case CACHE_ENTRY_KIND_SHEET: { res = G.loading_sheet; } break;
|
|
default: { sys_panic(LIT("Unknown sprite cache entry kind")); } break;
|
|
}
|
|
|
|
struct sprite_scope_cache_ref *scope_ref = cache_entry_from_tag(scope, tag, kind, 0);
|
|
struct cache_ref ref = scope_ref->ref;
|
|
|
|
enum cache_entry_state state = atomic32_fetch(&ref.e->state);
|
|
if (state == CACHE_ENTRY_STATE_LOADED) {
|
|
switch (kind) {
|
|
case CACHE_ENTRY_KIND_TEXTURE: { res = ref.e->texture; } break;
|
|
case CACHE_ENTRY_KIND_SHEET: { res = ref.e->sheet; } break;
|
|
default: { sys_panic(LIT("Unknown sprite cache entry kind")); } break;
|
|
}
|
|
} else if (state == CACHE_ENTRY_STATE_NONE) {
|
|
/* If entry is new, load texture */
|
|
if (atomic32_fetch_test_set(&ref.e->state, CACHE_ENTRY_STATE_NONE, CACHE_ENTRY_STATE_QUEUED) == CACHE_ENTRY_STATE_NONE) {
|
|
/* If caller is awaiting result then just load now on the calling thread. Otherwise spawn a work task. */
|
|
if (await) {
|
|
switch (kind) {
|
|
case CACHE_ENTRY_KIND_TEXTURE: {
|
|
cache_entry_load_texture(ref, tag);
|
|
res = ref.e->texture;
|
|
} break;
|
|
case CACHE_ENTRY_KIND_SHEET: {
|
|
cache_entry_load_sheet(ref, tag);
|
|
res = ref.e->sheet;
|
|
} break;
|
|
default: { sys_panic(LIT("Unknown sprite cache entry kind")); } break;
|
|
}
|
|
} else {
|
|
/* Allocate cmd */
|
|
push_load_job(ref, tag);
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Spinlock until result is ready */
|
|
if (await && state != CACHE_ENTRY_STATE_LOADED) {
|
|
while (atomic32_fetch(&ref.e->state) != CACHE_ENTRY_STATE_LOADED) {
|
|
ix_pause();
|
|
}
|
|
}
|
|
|
|
return res;
|
|
}
|
|
|
|
/* ========================== *
|
|
* Texture
|
|
* ========================== */
|
|
|
|
struct sprite_texture *sprite_texture_from_tag_await(struct sprite_scope *scope, struct sprite_tag tag)
|
|
{
|
|
return (struct sprite_texture *)data_from_tag_internal(scope, tag, CACHE_ENTRY_KIND_TEXTURE, 1);
|
|
}
|
|
|
|
struct sprite_texture *sprite_texture_from_tag_async(struct sprite_scope *scope, struct sprite_tag tag)
|
|
{
|
|
return (struct sprite_texture *)data_from_tag_internal(scope, tag, CACHE_ENTRY_KIND_TEXTURE, 0);
|
|
}
|
|
|
|
|
|
void sprite_texture_from_tag_prefetch(struct sprite_scope *scope, struct sprite_tag tag)
|
|
{
|
|
data_from_tag_internal(scope, tag, CACHE_ENTRY_KIND_TEXTURE, 0);
|
|
}
|
|
|
|
/* ========================== *
|
|
* Sheet
|
|
* ========================== */
|
|
|
|
struct sprite_sheet *sprite_sheet_from_tag_await(struct sprite_scope *scope, struct sprite_tag tag)
|
|
{
|
|
return (struct sprite_sheet *)data_from_tag_internal(scope, tag, CACHE_ENTRY_KIND_SHEET, 1);
|
|
}
|
|
|
|
struct sprite_sheet *sprite_sheet_from_tag_async(struct sprite_scope *scope, struct sprite_tag tag)
|
|
{
|
|
return (struct sprite_sheet *)data_from_tag_internal(scope, tag, CACHE_ENTRY_KIND_SHEET, 0);
|
|
}
|
|
|
|
void sprite_sheet_from_tag_prefetch(struct sprite_scope *scope, struct sprite_tag tag)
|
|
{
|
|
data_from_tag_internal(scope, tag, CACHE_ENTRY_KIND_SHEET, 0);
|
|
}
|
|
|
|
|
|
struct sprite_sheet_frame sprite_sheet_get_frame(struct sprite_sheet *sheet, u32 index)
|
|
{
|
|
if (index < sheet->frames_count ) {
|
|
return sheet->frames[index];
|
|
}
|
|
struct sprite_sheet_frame res = ZI;
|
|
res.index = 0;
|
|
res.duration = 0.1;
|
|
res.clip = CLIP_ALL;
|
|
return res;
|
|
}
|
|
|
|
struct sprite_sheet_span sprite_sheet_get_span(struct sprite_sheet *sheet, struct string name)
|
|
{
|
|
struct sprite_sheet_span res = ZI;
|
|
if (sheet->spans_count > 0) {
|
|
u64 hash = hash_fnv64(HASH_FNV64_BASIS, name);
|
|
struct sprite_sheet_span *entry = (struct sprite_sheet_span *)dict_get(sheet->spans_dict, hash);
|
|
if (entry) {
|
|
res = *entry;
|
|
}
|
|
}
|
|
return res;
|
|
}
|
|
|
|
struct sprite_sheet_slice sprite_sheet_get_slice(struct sprite_sheet *sheet, struct string name, u32 frame_index)
|
|
{
|
|
if (sheet->slice_groups_count > 0) {
|
|
u64 hash = hash_fnv64(HASH_FNV64_BASIS, name);
|
|
struct sprite_sheet_slice_group *group = (struct sprite_sheet_slice_group *)dict_get(sheet->slice_groups_dict, hash);
|
|
if (group) {
|
|
return group->frame_slices[frame_index * group->per_frame_count];
|
|
}
|
|
}
|
|
|
|
/* Return 'pivot' by default */
|
|
struct sprite_sheet_slice res = ZI;
|
|
if (string_eq(name, LIT("pivot"))) {
|
|
/* 'pivot' slice does not exist, return center */
|
|
res.center = V2(0, 0);
|
|
res.center_px = v2_mul(sheet->frame_size, 0.5f);
|
|
res.dir_px = V2(res.center_px.x, 0);
|
|
res.dir = V2(0, -0.5);
|
|
} else {
|
|
res = sprite_sheet_get_slice(sheet, LIT("pivot"), frame_index);
|
|
}
|
|
|
|
return res;
|
|
}
|
|
|
|
struct sprite_sheet_slice_array sprite_sheet_get_slices(struct sprite_sheet *sheet, struct string name, u32 frame_index)
|
|
{
|
|
struct sprite_sheet_slice_array res = ZI;
|
|
if (sheet->slice_groups_count > 0) {
|
|
u64 hash = hash_fnv64(HASH_FNV64_BASIS, name);
|
|
struct sprite_sheet_slice_group *group = (struct sprite_sheet_slice_group *)dict_get(sheet->slice_groups_dict, hash);
|
|
if (group) {
|
|
res.count = group->per_frame_count;
|
|
res.slices = &group->frame_slices[frame_index * group->per_frame_count];
|
|
}
|
|
}
|
|
return res;
|
|
}
|
|
|
|
/* ========================== *
|
|
* Load job
|
|
* ========================== */
|
|
|
|
INTERNAL SYS_JOB_DEF(sprite_load_job, job)
|
|
{
|
|
__prof;
|
|
struct load_cmd *cmd = job.sig;
|
|
struct cache_ref ref = cmd->ref;
|
|
|
|
switch (ref.e->kind) {
|
|
case CACHE_ENTRY_KIND_TEXTURE: {
|
|
cache_entry_load_texture(ref, cmd->tag);
|
|
} break;
|
|
case CACHE_ENTRY_KIND_SHEET: {
|
|
cache_entry_load_sheet(ref, cmd->tag);
|
|
} break;
|
|
default: { sys_panic(LIT("Unknown sprite cache node kind")); } break;
|
|
}
|
|
|
|
/* Free cmd */
|
|
struct snc_lock lock = snc_lock_e(&G.load_cmds_mutex);
|
|
{
|
|
sprite_scope_end(cmd->scope);
|
|
cmd->next_free = G.first_free_load_cmd;
|
|
G.first_free_load_cmd = cmd;
|
|
}
|
|
snc_unlock(&lock);
|
|
}
|
|
|
|
/* ========================== *
|
|
* Resource watch
|
|
* ========================== */
|
|
|
|
#if RESOURCE_RELOADING
|
|
|
|
INTERNAL void reload_if_exists(struct sprite_scope *scope, struct sprite_tag tag, enum cache_entry_kind kind)
|
|
{
|
|
struct cache_entry_hash hash = cache_entry_hash_from_tag_hash(tag.hash, kind);
|
|
struct cache_bin *bin = &G.cache.bins[hash.v % CACHE_BINS_COUNT];
|
|
struct sprite_scope_cache_ref *existing_ref = 0;
|
|
struct snc_lock bin_lock = snc_lock_s(&bin->mutex);
|
|
{
|
|
existing_ref = cache_lookup(scope, hash, &bin_lock);
|
|
}
|
|
snc_unlock(&bin_lock);
|
|
|
|
if (existing_ref) {
|
|
logf_info("Sprite resource file \"%F\" has changed for sprite [%F].", FMT_STR(tag.path), FMT_HEX(hash.v));
|
|
struct sprite_scope_cache_ref *scope_ref = cache_entry_from_tag(scope, tag, kind, 1);
|
|
push_load_job(scope_ref->ref, tag);
|
|
}
|
|
}
|
|
|
|
INTERNAL RESOURCE_WATCH_CALLBACK_FUNC_DEF(sprite_resource_watch_callback, name)
|
|
{
|
|
struct sprite_scope *scope = sprite_scope_begin();
|
|
|
|
struct sprite_tag tag = sprite_tag_from_path(name);
|
|
for (enum cache_entry_kind kind = 0; kind < NUM_CACHE_ENTRY_KINDS; ++kind) {
|
|
reload_if_exists(scope, tag, kind);
|
|
}
|
|
|
|
sprite_scope_end(scope);
|
|
}
|
|
|
|
#endif
|
|
|
|
/* ========================== *
|
|
* Evictor job
|
|
* ========================== */
|
|
|
|
struct evict_node {
|
|
i32 last_ref_cycle;
|
|
struct cache_entry *cache_entry;
|
|
struct cache_bin *cache_bin;
|
|
|
|
struct evict_node *next_evicted;
|
|
};
|
|
|
|
INTERNAL SORT_COMPARE_FUNC_DEF(evict_sort, arg_a, arg_b, udata)
|
|
{
|
|
(UNUSED)udata;
|
|
struct evict_node *a = arg_a;
|
|
struct evict_node *b = arg_b;
|
|
i32 a_cycle = a->last_ref_cycle;
|
|
i32 b_cycle = b->last_ref_cycle;
|
|
return (b_cycle > a_cycle) - (a_cycle > b_cycle);
|
|
}
|
|
|
|
/* NOTE:
|
|
* A cache node is safe from eviction as long as:
|
|
* - Its bin mutex is locked
|
|
* - Any references are held to the node (its refcount > 0)
|
|
*
|
|
* An attempt to evict a cache node will occur when:
|
|
* - Its refcount = 0 and
|
|
* - The cache is over its memory budget and the node's last reference is longer ago than the grace period
|
|
* - Resource reloading is enabled and the node is out of date due to a change to its original resource file
|
|
*/
|
|
INTERNAL SYS_JOB_DEF(sprite_evictor_job, _)
|
|
{
|
|
(UNUSED)_;
|
|
b32 shutdown = 0;
|
|
while (!shutdown) {
|
|
{
|
|
__profn("Sprite evictor cycle");
|
|
struct arena_temp scratch = scratch_begin_no_conflict();
|
|
u64 evict_array_count = 0;
|
|
struct evict_node *evict_array = arena_push_dry(scratch.arena, struct evict_node);
|
|
{
|
|
i32 cur_cycle = atomic32_fetch(&G.evictor_cycle.v);
|
|
|
|
/* Scan for evictable nodes */
|
|
b32 cache_over_budget_threshold = atomic64_fetch(&G.cache.memory_usage.v) > (i64)CACHE_MEMORY_BUDGET_THRESHOLD;
|
|
if (cache_over_budget_threshold || RESOURCE_RELOADING) {
|
|
__profn("Evictor scan");
|
|
for (u64 i = 0; i < CACHE_BINS_COUNT; ++i) {
|
|
struct cache_bin *bin = &G.cache.bins[i];
|
|
struct snc_lock bin_lock = snc_lock_s(&bin->mutex);
|
|
{
|
|
struct cache_entry *n = bin->first;
|
|
while (n) {
|
|
u64 refcount_uncast = atomic64_fetch(&n->refcount_struct.v);
|
|
struct cache_refcount refcount = *(struct cache_refcount *)&refcount_uncast;
|
|
if (refcount.count <= 0) {
|
|
/* Add node to evict list */
|
|
#if RESOURCE_RELOADING
|
|
b32 is_out_of_date = atomic32_fetch(&n->out_of_date);
|
|
#else
|
|
b32 is_out_of_date = 0;
|
|
#endif
|
|
b32 is_old = cache_over_budget_threshold && ((cur_cycle - refcount.last_ref_cycle) > EVICTOR_GRACE_PERIOD_CYCLES);
|
|
if (is_old || is_out_of_date) {
|
|
struct evict_node *en = arena_push(scratch.arena, struct evict_node);
|
|
en->cache_entry = n;
|
|
en->cache_bin = bin;
|
|
en->last_ref_cycle = refcount.last_ref_cycle;
|
|
if (is_out_of_date) {
|
|
en->last_ref_cycle = -1;
|
|
}
|
|
++evict_array_count;
|
|
}
|
|
}
|
|
|
|
n = n->next_in_bin;
|
|
}
|
|
}
|
|
snc_unlock(&bin_lock);
|
|
}
|
|
}
|
|
|
|
/* Scratch arena should only contain evict array at this point */
|
|
ASSERT(((arena_base(scratch.arena) + scratch.arena->pos) - (sizeof(*evict_array) * evict_array_count)) == (u8 *)evict_array);
|
|
|
|
/* Sort evict nodes */
|
|
{
|
|
__profn("Evictor sort");
|
|
merge_sort(evict_array, evict_array_count, sizeof(*evict_array), evict_sort, 0);
|
|
}
|
|
|
|
/* Remove evictable nodes from cache until under budget */
|
|
struct evict_node *first_evicted = 0;
|
|
{
|
|
__profn("Evictor cache removal");
|
|
b32 stop_evicting = 0;
|
|
for (u64 i = 0; i < evict_array_count && !stop_evicting; ++i) {
|
|
struct evict_node *en = &evict_array[i];
|
|
struct cache_bin *bin = en->cache_bin;
|
|
struct cache_entry *entry = en->cache_entry;
|
|
i32 last_ref_cycle = en->last_ref_cycle;
|
|
b32 cache_over_budget_target = atomic64_fetch(&G.cache.memory_usage.v) > (i64)CACHE_MEMORY_BUDGET_TARGET;
|
|
struct snc_lock bin_lock = snc_lock_e(&bin->mutex);
|
|
{
|
|
u64 refcount_uncast = atomic64_fetch(&entry->refcount_struct.v);
|
|
struct cache_refcount refcount = *(struct cache_refcount *)&refcount_uncast;
|
|
if (refcount.count > 0 || (last_ref_cycle >= 0 && refcount.last_ref_cycle != en->last_ref_cycle)) {
|
|
/* Cache node has been referenced since scan, skip node. */
|
|
} else if (cache_over_budget_target || last_ref_cycle < 0) {
|
|
/* Remove from cache bin */
|
|
struct cache_entry *prev = entry->prev_in_bin;
|
|
struct cache_entry *next = entry->next_in_bin;
|
|
if (prev) {
|
|
prev->next_in_bin = next;
|
|
} else {
|
|
bin->first = next;
|
|
}
|
|
if (next) {
|
|
next->prev_in_bin = prev;
|
|
} else {
|
|
bin->last = prev;
|
|
}
|
|
|
|
atomic64_fetch_add(&G.cache.memory_usage.v, -((i64)entry->memory_usage));
|
|
|
|
/* Add to evicted list */
|
|
en->next_evicted = first_evicted;
|
|
first_evicted = en;
|
|
} else {
|
|
/* Cache is no longer over budget or force evicting, stop iteration */
|
|
stop_evicting = 1;
|
|
}
|
|
}
|
|
snc_unlock(&bin_lock);
|
|
}
|
|
}
|
|
|
|
if (first_evicted) {
|
|
/* Release evicted node memory */
|
|
{
|
|
__profn("Evictor memory release");
|
|
for (struct evict_node *en = first_evicted; en; en = en->next_evicted) {
|
|
struct cache_entry *n = en->cache_entry;
|
|
if (n->kind == CACHE_ENTRY_KIND_TEXTURE && n->texture->valid) {
|
|
gp_resource_release(n->texture->gp_texture);
|
|
}
|
|
arena_release(n->arena);
|
|
}
|
|
}
|
|
|
|
/* Add evicted nodes to free list */
|
|
{
|
|
__profn("Evictor free list append");
|
|
struct snc_lock pool_lock = snc_lock_e(&G.cache.entry_pool_mutex);
|
|
for (struct evict_node *en = first_evicted; en; en = en->next_evicted) {
|
|
struct cache_entry *n = en->cache_entry;
|
|
n->next_free = G.cache.entry_pool_first_free;
|
|
G.cache.entry_pool_first_free = n;
|
|
}
|
|
snc_unlock(&pool_lock);
|
|
}
|
|
}
|
|
}
|
|
atomic32_fetch_add(&G.evictor_cycle.v, 1);
|
|
scratch_end(scratch);
|
|
}
|
|
|
|
/* Evictor sleep */
|
|
{
|
|
struct snc_lock lock = snc_lock_e(&G.evictor_scheduler_mutex);
|
|
{
|
|
if (!G.evictor_scheduler_shutdown) {
|
|
snc_cv_wait_time(&G.evictor_scheduler_shutdown_cv, &lock, EVICTOR_CYCLE_INTERVAL_NS);
|
|
}
|
|
shutdown = G.evictor_scheduler_shutdown;
|
|
}
|
|
snc_unlock(&lock);
|
|
}
|
|
}
|
|
}
|