1341 lines
46 KiB
C
1341 lines
46 KiB
C
S_SharedState S_shared_state = ZI;
|
|
|
|
////////////////////////////////
|
|
//~ Startup
|
|
|
|
void S_Startup(void)
|
|
{
|
|
__prof;
|
|
S_SharedState *g = &S_shared_state;
|
|
g->perm_arena = AcquireArena(Mebi(1));
|
|
{
|
|
/* Init loading texture */
|
|
g->loading_texture = PushStruct(g->perm_arena, S_Texture);
|
|
|
|
/* Init nil texture */
|
|
g->nil_texture = PushStruct(g->perm_arena, S_Texture);
|
|
g->nil_texture->loaded = 1;
|
|
{
|
|
TempArena scratch = BeginScratchNoConflict();
|
|
GPU_ResourceDesc desc = ZI;
|
|
desc.kind = GPU_ResourceKind_Texture2D;
|
|
desc.texture.format = GPU_Format_R8G8B8A8_Unorm;
|
|
desc.texture.size = VEC3I32(64, 64, 1);
|
|
g->nil_texture->gpu_resource = GPU_AcquireResource(desc);
|
|
u32 *pixels = S_GeneratePurpleBlackImage(scratch.arena, desc.texture.size.x, desc.texture.size.y);
|
|
GPU_PushString(0, g->nil_texture->gpu_resource, STRING(desc.texture.size.x * desc.texture.size.y * 4, (u8 *)pixels));
|
|
EndScratch(scratch);
|
|
}
|
|
|
|
/* Init loading sheet */
|
|
g->loading_sheet = PushStruct(g->perm_arena, S_Sheet);
|
|
g->loading_sheet->image_size = VEC2(PIXELS_PER_UNIT, PIXELS_PER_UNIT);
|
|
g->loading_sheet->frame_size = VEC2(PIXELS_PER_UNIT, PIXELS_PER_UNIT);
|
|
|
|
/* Init nil sheet */
|
|
g->nil_sheet = PushStruct(g->perm_arena, S_Sheet);
|
|
g->nil_sheet->image_size = VEC2(PIXELS_PER_UNIT, PIXELS_PER_UNIT);
|
|
g->nil_sheet->frame_size = VEC2(PIXELS_PER_UNIT, PIXELS_PER_UNIT);
|
|
g->nil_sheet->loaded = 1;
|
|
}
|
|
SetArenaReadonly(g->perm_arena);
|
|
|
|
g->cache.arena = AcquireArena(Gibi(64));
|
|
g->cache.bins = PushStructs(g->cache.arena, S_CacheEntryBin, S_CacheBinsCount);
|
|
|
|
g->scopes_arena = AcquireArena(Gibi(64));
|
|
|
|
RunJob(1, S_EvictorJob, JobPool_Background, JobPriority_Low, &g->shutdown_counter, 0);
|
|
|
|
OnExit(&S_Shutdown);
|
|
#if RESOURCE_RELOADING
|
|
W_RegisterCallback(&S_WatchSpriteCallback);
|
|
#endif
|
|
}
|
|
|
|
////////////////////////////////
|
|
//~ Shutdown
|
|
|
|
ExitFuncDef(S_Shutdown)
|
|
{
|
|
__prof;
|
|
S_SharedState *g = &S_shared_state;
|
|
/* Signal evictor shutdown */
|
|
{
|
|
Lock lock = LockE(&g->evictor_scheduler_mutex);
|
|
g->evictor_scheduler_shutdown = 1;
|
|
SignalCv(&g->evictor_scheduler_shutdown_cv, I32Max);
|
|
Unlock(&lock);
|
|
}
|
|
/* Yield for evictor shutdown */
|
|
YieldOnCounter(&g->shutdown_counter);
|
|
}
|
|
|
|
////////////////////////////////
|
|
//~ Nil purple-black image
|
|
|
|
u32 *S_GeneratePurpleBlackImage(Arena *arena, u32 width, u32 height)
|
|
{
|
|
u32 *pixels = PushStructsNoZero(arena, u32, width * height);
|
|
|
|
/* Create texture containing alternating blocks of purple and black */
|
|
u32 color_size = 4;
|
|
u32 color_1 = 0xFFDC00FF;
|
|
u32 color_2 = 0xFF000000;
|
|
for (u32 x = 0; x < width; ++x)
|
|
{
|
|
for (u32 y = 0; y < height; ++y)
|
|
{
|
|
u32 pixel_index = x + width * y;
|
|
if ((y / color_size) % 2 == 0)
|
|
{
|
|
if ((x / color_size) % 2 == 0)
|
|
{
|
|
pixels[pixel_index] = color_1;
|
|
}
|
|
else
|
|
{
|
|
pixels[pixel_index] = color_2;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
if ((x / color_size) % 2 == 0)
|
|
{
|
|
pixels[pixel_index] = color_2;
|
|
}
|
|
else
|
|
{
|
|
pixels[pixel_index] = color_1;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return pixels;
|
|
}
|
|
|
|
////////////////////////////////
|
|
//~ Tag
|
|
|
|
S_Tag S_TagFromPath(String path)
|
|
{
|
|
S_Tag result = ZI;
|
|
result.hash = HashFnv64(Fnv64Basis, path);
|
|
result.path = path;
|
|
return result;
|
|
}
|
|
|
|
b32 S_IsTagNil(S_Tag tag)
|
|
{
|
|
return tag.hash == 0;
|
|
}
|
|
|
|
b32 S_EqTag(S_Tag t1, S_Tag t2)
|
|
{
|
|
return t1.hash == t2.hash;
|
|
}
|
|
|
|
S_Hash S_CacheEntryFromTagHash(u64 tag_hash, S_CacheEntryKind kind)
|
|
{
|
|
return (S_Hash) { .v = RandU64FromSeed(tag_hash + kind) };
|
|
}
|
|
|
|
|
|
////////////////////////////////
|
|
//~ Init sheet
|
|
|
|
S_Sheet S_SheetFromAseResult(Arena *arena, ASE_DecodedSheet ase)
|
|
{
|
|
__prof;
|
|
S_Sheet sheet = ZI;
|
|
|
|
Assert(ase.num_frames >= 1);
|
|
|
|
Vec2 frame_size = ase.frame_size;
|
|
Vec2 frame_center = MulVec2(ase.frame_size, 0.5f);
|
|
|
|
/* Init frames */
|
|
{
|
|
__profn("Init frames");
|
|
sheet.image_size = ase.image_size;
|
|
sheet.frame_size = ase.frame_size;
|
|
sheet.frames = PushStructs(arena, S_Frame, ase.num_frames);
|
|
sheet.frames_count = ase.num_frames;
|
|
for (ASE_Frame *ase_frame = ase.frame_head; ase_frame; ase_frame = ase_frame->next)
|
|
{
|
|
u32 index = ase_frame->index;
|
|
|
|
Vec2 clip_p1 = { (f32)ase_frame->x1 / (f32)ase.image_size.x, (f32)ase_frame->y1 / (f32)ase.image_size.y };
|
|
Vec2 clip_p2 = { (f32)ase_frame->x2 / (f32)ase.image_size.x, (f32)ase_frame->y2 / (f32)ase.image_size.y };
|
|
|
|
sheet.frames[index] = (S_Frame) {
|
|
.index = index,
|
|
.duration = ase_frame->duration,
|
|
.clip = (ClipRect) { clip_p1, clip_p2 }
|
|
};
|
|
}
|
|
}
|
|
|
|
/* Init spans */
|
|
sheet.spans_count = ase.num_spans;
|
|
if (ase.num_spans > 0)
|
|
{
|
|
__profn("Init spans");
|
|
sheet.spans = PushStructs(arena, S_Span, sheet.spans_count);
|
|
sheet.spans_dict = InitDict(arena, (u64)(ase.num_spans * S_SheetSpanLookupTableBinRatio));
|
|
u64 index = 0;
|
|
for (ASE_Span *ase_span = ase.span_head; ase_span; ase_span = ase_span->next)
|
|
{
|
|
String name = PushString(arena, ase_span->name);
|
|
S_Span *span = &sheet.spans[index];
|
|
span->name = name;
|
|
span->start = ase_span->start;
|
|
span->end = ase_span->end;
|
|
u64 hash = HashFnv64(Fnv64Basis, name);
|
|
SetDictValue(arena, sheet.spans_dict, hash, (u64)span);
|
|
++index;
|
|
}
|
|
}
|
|
|
|
/* Init slices */
|
|
if (ase.num_slice_keys > 0)
|
|
{
|
|
__profn("Init slices");
|
|
TempArena scratch = BeginScratch(arena);
|
|
|
|
struct temp_ase_slice_key_node
|
|
{
|
|
ASE_SliceKey *key;
|
|
struct temp_ase_slice_key_node *next;
|
|
|
|
u32 index_in_frame;
|
|
u32 earliest_frame;
|
|
};
|
|
|
|
struct temp_slice_group_node
|
|
{
|
|
String name;
|
|
u64 per_frame_count;
|
|
struct temp_ase_slice_key_node *temp_ase_slice_key_head;
|
|
struct temp_slice_group_node *next;
|
|
|
|
S_SheetSliceGroup *final_slice_group;
|
|
};
|
|
|
|
/* Group slices by name and find out counts per frame */
|
|
u64 num_temp_slice_group_nodes = 0;
|
|
struct temp_slice_group_node *temp_slice_group_head = 0;
|
|
{
|
|
Dict *temp_slice_dict = InitDict(scratch.arena, (u64)(ase.num_slice_keys * 2));
|
|
for (ASE_SliceKey *ase_slice_key = ase.slice_key_head; ase_slice_key; ase_slice_key = ase_slice_key->next)
|
|
{
|
|
String name = ase_slice_key->name;
|
|
u64 hash = HashFnv64(Fnv64Basis, name);
|
|
struct temp_slice_group_node *temp_slice_group_node = (struct temp_slice_group_node *)DictValueFromHash(temp_slice_dict, hash);
|
|
if (!temp_slice_group_node)
|
|
{
|
|
temp_slice_group_node = PushStruct(scratch.arena, struct temp_slice_group_node);
|
|
temp_slice_group_node->name = name;
|
|
SetDictValue(scratch.arena, temp_slice_dict, hash, (u64)temp_slice_group_node);
|
|
|
|
++num_temp_slice_group_nodes;
|
|
temp_slice_group_node->next = temp_slice_group_head;
|
|
temp_slice_group_head = temp_slice_group_node;
|
|
}
|
|
|
|
struct temp_ase_slice_key_node *node = PushStruct(scratch.arena, struct temp_ase_slice_key_node);
|
|
node->key = ase_slice_key;
|
|
node->next = temp_slice_group_node->temp_ase_slice_key_head;
|
|
node->earliest_frame = U32Max; /* To be overwritten later after iterating */
|
|
|
|
temp_slice_group_node->temp_ase_slice_key_head = node;
|
|
|
|
++temp_slice_group_node->per_frame_count;
|
|
}
|
|
}
|
|
|
|
/* Acquire slice groups & fill originals in 2d array */
|
|
sheet.slice_groups_count = num_temp_slice_group_nodes;
|
|
sheet.slice_groups = PushStructs(arena, S_SheetSliceGroup, sheet.slice_groups_count);
|
|
sheet.slice_groups_dict = InitDict(arena, (u64)(num_temp_slice_group_nodes * S_SliceLookupTableBinRatio));
|
|
|
|
u64 index = 0;
|
|
for (struct temp_slice_group_node *temp_slice_group_node = temp_slice_group_head; temp_slice_group_node; temp_slice_group_node = temp_slice_group_node->next)
|
|
{
|
|
S_SheetSliceGroup *slice_group = &sheet.slice_groups[index];
|
|
slice_group->name = PushString(arena, temp_slice_group_node->name);
|
|
slice_group->per_frame_count = temp_slice_group_node->per_frame_count;
|
|
|
|
slice_group->frame_slices = PushStructs(arena, S_Slice, ase.num_frames * slice_group->per_frame_count);
|
|
|
|
u64 index_in_frame = 0;
|
|
for (struct temp_ase_slice_key_node *node = temp_slice_group_node->temp_ase_slice_key_head; node; node = node->next)
|
|
{
|
|
ASE_SliceKey *key = node->key;
|
|
|
|
for (ASE_Slice *ase_slice = key->slice_head; ase_slice; ase_slice = ase_slice->next)
|
|
{
|
|
u32 start = ase_slice->start;
|
|
|
|
S_Slice *slice = &slice_group->frame_slices[(start * slice_group->per_frame_count) + index_in_frame];
|
|
slice->original = 1;
|
|
|
|
f32 x1_px = ase_slice->x1;
|
|
f32 y1_px = ase_slice->y1;
|
|
f32 x2_px = ase_slice->x2;
|
|
f32 y2_px = ase_slice->y2;
|
|
f32 width_px = x2_px - x1_px;
|
|
f32 height_px = y2_px - y1_px;
|
|
|
|
f32 x1 = (x1_px - frame_center.x) / frame_size.x;
|
|
f32 y1 = (y1_px - frame_center.y) / frame_size.y;
|
|
f32 x2 = (x2_px - frame_center.x) / frame_size.x;
|
|
f32 y2 = (y2_px - frame_center.y) / frame_size.y;
|
|
f32 width = x2 - x1;
|
|
f32 height = y2 - y1;
|
|
|
|
/* Rect */
|
|
Rect rect_px = RectFromScalar(x1_px, y1_px, width_px, height_px);
|
|
Rect rect = RectFromScalar(x1, y1, width, height);
|
|
/* Center */
|
|
Vec2 center_px = VEC2(x1_px + (width_px * 0.5f), y1_px + (height_px * 0.5f));
|
|
Vec2 center = VEC2(x1 + (width * 0.5f), y1 + (height * 0.5f));
|
|
/* Dir */
|
|
Vec2 dir_px = VEC2(center_px.x, -1);
|
|
Vec2 dir = VEC2(0, -1);
|
|
|
|
slice->rect_px = rect_px;
|
|
slice->center_px = center_px;
|
|
slice->dir_px = dir_px;
|
|
|
|
slice->rect = rect;
|
|
slice->center = center;
|
|
slice->dir = dir;
|
|
|
|
node->index_in_frame = index_in_frame;
|
|
if (start < node->earliest_frame)
|
|
{
|
|
node->earliest_frame = start;
|
|
}
|
|
}
|
|
|
|
++index_in_frame;
|
|
}
|
|
|
|
temp_slice_group_node->final_slice_group = slice_group;
|
|
u64 hash = HashFnv64(Fnv64Basis, slice_group->name);
|
|
SetDictValue(arena, sheet.slice_groups_dict, hash, (u64)slice_group);
|
|
++index;
|
|
}
|
|
|
|
/* Propagate original slices into next frames (and first slices into previous frames) */
|
|
for (struct temp_slice_group_node *temp_slice_group_node = temp_slice_group_head; temp_slice_group_node; temp_slice_group_node = temp_slice_group_node->next)
|
|
{
|
|
S_SheetSliceGroup *slice_group = temp_slice_group_node->final_slice_group;
|
|
|
|
for (struct temp_ase_slice_key_node *node = temp_slice_group_node->temp_ase_slice_key_head; node; node = node->next)
|
|
{
|
|
ASE_SliceKey *key = node->key;
|
|
u32 index_in_frame = node->index_in_frame;
|
|
for (ASE_Slice *ase_slice = key->slice_head; ase_slice; ase_slice = ase_slice->next)
|
|
{
|
|
u32 start = ase_slice->start;
|
|
|
|
S_Slice *slice = &slice_group->frame_slices[(start * slice_group->per_frame_count) + index_in_frame];
|
|
|
|
/* Propagate earliest slice to all previous frames */
|
|
if (start == node->earliest_frame && start > 0)
|
|
{
|
|
for (u32 i = start; i-- > 0;)
|
|
{
|
|
S_Slice *target = &slice_group->frame_slices[(i * slice_group->per_frame_count) + index_in_frame];
|
|
*target = *slice;
|
|
target->original = 0;
|
|
}
|
|
}
|
|
|
|
/* Propagate slice to forward frames until original is found */
|
|
for (u32 i = start + 1; i < ase.num_frames; ++i)
|
|
{
|
|
S_Slice *target = &slice_group->frame_slices[(i * slice_group->per_frame_count) + index_in_frame];
|
|
if (target->original)
|
|
{
|
|
break;
|
|
}
|
|
else
|
|
{
|
|
*target = *slice;
|
|
target->original = 0;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Calculate direction vectors */
|
|
for (struct temp_slice_group_node *temp_slice_group_node = temp_slice_group_head; temp_slice_group_node; temp_slice_group_node = temp_slice_group_node->next)
|
|
{
|
|
String ray_suffix = Lit(".ray");
|
|
|
|
S_SheetSliceGroup *ray_slice_group = temp_slice_group_node->final_slice_group;
|
|
String ray_slice_name = ray_slice_group->name;
|
|
if (StringEndsWith(ray_slice_name, ray_suffix))
|
|
{
|
|
String point_slice_name = ray_slice_name;
|
|
point_slice_name.len -= ray_suffix.len;
|
|
u64 hash = HashFnv64(Fnv64Basis, point_slice_name);
|
|
S_SheetSliceGroup *point_slice_group = (S_SheetSliceGroup *)DictValueFromHash(sheet.slice_groups_dict, hash);
|
|
if (point_slice_group)
|
|
{
|
|
u32 point_slices_per_frame = point_slice_group->per_frame_count;
|
|
|
|
for (u32 i = 0; i < ase.num_frames; ++i)
|
|
{
|
|
/* Use ray slice in ray group */
|
|
S_Slice *ray_slice = &ray_slice_group->frame_slices[i * point_slices_per_frame];
|
|
Vec2 ray_end = ray_slice->center_px;
|
|
Vec2 ray_end_norm = ray_slice->center;
|
|
|
|
/* Apply to each point slice in point group */
|
|
for (u32 j = 0; j < point_slices_per_frame; ++j)
|
|
{
|
|
S_Slice *point_slice = &point_slice_group->frame_slices[(i * point_slices_per_frame) + j];
|
|
point_slice->dir_px = SubVec2(ray_end, point_slice->center_px);
|
|
point_slice->dir = SubVec2(ray_end_norm, point_slice->center);
|
|
point_slice->has_ray = 1;
|
|
}
|
|
}
|
|
|
|
}
|
|
}
|
|
}
|
|
|
|
EndScratch(scratch);
|
|
}
|
|
|
|
return sheet;
|
|
}
|
|
|
|
////////////////////////////////
|
|
//~ Load job
|
|
|
|
JobDef(S_LoadSpriteJob, sig, UNUSED id)
|
|
{
|
|
__prof;
|
|
S_CacheEntryRef ref = sig->ref;
|
|
S_Tag tag = sig->tag;
|
|
S_Scope *scope = sig->scope;
|
|
switch (ref.e->kind)
|
|
{
|
|
case S_CacheEntryKind_Texture:
|
|
{
|
|
S_LoadCacheEntryTexture(ref, tag);
|
|
} break;
|
|
case S_CacheEntryKind_Sheet:
|
|
{
|
|
S_LoadCacheEntrySheet(ref, tag);
|
|
} break;
|
|
default: { Panic(Lit("Unknown sprite cache node kind")); } break;
|
|
}
|
|
S_EndScope(scope);
|
|
}
|
|
|
|
void S_PushLoadJob(S_CacheEntryRef ref, S_Tag tag)
|
|
{
|
|
S_LoadSpriteJob_Desc *desc = PushJobDesc(S_LoadSpriteJob, .pool = JobPool_Background, .priority = JobPriority_Inherit);
|
|
desc->sig->scope = S_BeginScope(); /* Scope ended by job */
|
|
desc->sig->ref = S_EnsureRefFromRef(desc->sig->scope, ref)->ref;
|
|
desc->sig->tag = tag;
|
|
tag.path = PushString(desc->arena, tag.path);
|
|
RunJobEx((GenericJobDesc *)desc);
|
|
}
|
|
|
|
////////////////////////////////
|
|
//~ Cache load operations
|
|
|
|
void S_LoadCacheEntryTexture(S_CacheEntryRef ref, S_Tag tag)
|
|
{
|
|
__prof;
|
|
S_SharedState *g = &S_shared_state;
|
|
TempArena scratch = BeginScratchNoConflict();
|
|
S_CacheEntry *e = ref.e;
|
|
|
|
Atomic32FetchSet(&e->state, S_CacheEntryState_Working);
|
|
String path = tag.path;
|
|
|
|
P_LogInfoF("Loading sprite texture [%F] \"%F\"", FmtHex(e->hash.v), FmtString(path));
|
|
b32 success = 0;
|
|
i64 start_ns = TimeNs();
|
|
|
|
Assert(StringEndsWith(path, Lit(".ase")));
|
|
Assert(e->kind == S_CacheEntryKind_Texture);
|
|
|
|
/* TODO: Replace arena allocs w/ buddy allocator */
|
|
/* TODO: Arena probably overkill. Just using it to store texture struct. */
|
|
e->arena = AcquireArena(S_TextureArenaReserve);
|
|
u64 memory_size = 0;
|
|
{
|
|
/* Decode */
|
|
ASE_DecodedImage decoded = ZI;
|
|
{
|
|
RES_Resource texture_rs = RES_OpenResource(path);
|
|
if (RES_ResourceExists(&texture_rs))
|
|
{
|
|
decoded = ASE_DecodeImage(scratch.arena, RES_GetResourceData(&texture_rs));
|
|
}
|
|
else
|
|
{
|
|
P_LogErrorF("Sprite texture for \"%F\" not found", FmtString(path));
|
|
}
|
|
RES_CloseResource(&texture_rs);
|
|
}
|
|
|
|
if (decoded.success)
|
|
{
|
|
/* Initialize */
|
|
GPU_ResourceDesc gpu_desc = ZI;
|
|
gpu_desc.kind = GPU_ResourceKind_Texture2D;
|
|
gpu_desc.texture.size = VEC3I32(decoded.width, decoded.height, 1);
|
|
gpu_desc.texture.format = GPU_Format_R8G8B8A8_Unorm_Srgb;
|
|
e->texture = PushStruct(e->arena, S_Texture);
|
|
e->texture->width = decoded.width;
|
|
e->texture->height = decoded.height;
|
|
e->texture->valid = 1;
|
|
e->texture->loaded = 1;
|
|
e->texture->gpu_resource = GPU_AcquireResource(gpu_desc);
|
|
{
|
|
u64 size = decoded.width * decoded.height * 4;
|
|
GPU_PushString(0, e->texture->gpu_resource, STRING(size, (u8 *)decoded.pixels));
|
|
}
|
|
/* TODO: Query gpu for more accurate texture size in VRAM */
|
|
memory_size += (decoded.width * decoded.height) * sizeof(*decoded.pixels);
|
|
success = 1;
|
|
}
|
|
}
|
|
SetArenaReadonly(e->arena);
|
|
e->memory_usage = e->arena->committed + memory_size;
|
|
Atomic64FetchAdd(&g->cache.memory_usage.v, e->memory_usage);
|
|
|
|
if (success)
|
|
{
|
|
P_LogSuccessF("Loaded sprite texture [%F] \"%F\" in %F seconds (cache size: %F bytes).",
|
|
FmtHex(e->hash.v),
|
|
FmtString(path),
|
|
FmtFloat(SecondsFromNs(TimeNs() - start_ns)),
|
|
FmtUint(e->memory_usage));
|
|
}
|
|
|
|
Atomic32FetchSet(&e->state, S_CacheEntryState_Loaded);
|
|
|
|
#if RESOURCE_RELOADING
|
|
S_CacheEntryBin *bin = &g->cache.bins[e->hash.v % S_CacheBinsCount];
|
|
Lock bin_lock = LockE(&bin->mutex);
|
|
{
|
|
for (S_CacheEntry *old_entry = bin->first; old_entry; old_entry = old_entry->next_in_bin)
|
|
{
|
|
if (old_entry != e && old_entry->hash.v == e->hash.v)
|
|
{
|
|
Atomic32FetchSet(&old_entry->out_of_date, 1);
|
|
}
|
|
}
|
|
e->load_time_ns = TimeNs();
|
|
}
|
|
Unlock(&bin_lock);
|
|
#endif
|
|
|
|
EndScratch(scratch);
|
|
}
|
|
|
|
void S_LoadCacheEntrySheet(S_CacheEntryRef ref, S_Tag tag)
|
|
{
|
|
__prof;
|
|
S_SharedState *g = &S_shared_state;
|
|
TempArena scratch = BeginScratchNoConflict();
|
|
S_CacheEntry *e = ref.e;
|
|
|
|
Atomic32FetchSet(&e->state, S_CacheEntryState_Working);
|
|
String path = tag.path;
|
|
|
|
P_LogInfoF("Loading sprite sheet [%F] \"%F\"", FmtHex(e->hash.v), FmtString(path));
|
|
b32 success = 0;
|
|
i64 start_ns = TimeNs();
|
|
|
|
Assert(e->kind == S_CacheEntryKind_Sheet);
|
|
|
|
/* TODO: Replace arena allocs w/ buddy allocator */
|
|
e->arena = AcquireArena(S_SheetArenaReserve);
|
|
{
|
|
/* Decode */
|
|
ASE_DecodedSheet decoded = ZI;
|
|
{
|
|
RES_Resource sheet_rs = RES_OpenResource(path);
|
|
if (RES_ResourceExists(&sheet_rs))
|
|
{
|
|
decoded = ASE_DecodeSheet(scratch.arena, RES_GetResourceData(&sheet_rs));
|
|
}
|
|
else
|
|
{
|
|
P_LogErrorF("Sprite sheet for \"%F\" not found", FmtString(path));
|
|
}
|
|
RES_CloseResource(&sheet_rs);
|
|
}
|
|
|
|
if (decoded.success)
|
|
{
|
|
RES_Resource sheet_rs = RES_OpenResource(path);
|
|
decoded = ASE_DecodeSheet(scratch.arena, RES_GetResourceData(&sheet_rs));
|
|
RES_CloseResource(&sheet_rs);
|
|
|
|
/* Initialize */
|
|
e->sheet = PushStructNoZero(e->arena, S_Sheet);
|
|
*e->sheet = S_SheetFromAseResult(e->arena, decoded);
|
|
e->sheet->loaded = 1;
|
|
e->sheet->valid = 1;
|
|
|
|
success = 1;
|
|
}
|
|
}
|
|
SetArenaReadonly(e->arena);
|
|
e->memory_usage = e->arena->committed;
|
|
Atomic64FetchAdd(&g->cache.memory_usage.v, e->memory_usage);
|
|
|
|
if (success)
|
|
{
|
|
P_LogSuccessF("Loaded sprite sheet [%F] \"%F\" in %F seconds (cache size: %F bytes).",
|
|
FmtHex(e->hash.v),
|
|
FmtString(path),
|
|
FmtFloat(SecondsFromNs(TimeNs() - start_ns)),
|
|
FmtUint(e->memory_usage));
|
|
}
|
|
|
|
Atomic32FetchSet(&e->state, S_CacheEntryState_Loaded);
|
|
|
|
#if RESOURCE_RELOADING
|
|
S_CacheEntryBin *bin = &g->cache.bins[e->hash.v % S_CacheBinsCount];
|
|
Lock bin_lock = LockE(&bin->mutex);
|
|
{
|
|
for (S_CacheEntry *old_entry = bin->first; old_entry; old_entry = old_entry->next_in_bin)
|
|
{
|
|
if (old_entry != e && old_entry->hash.v == e->hash.v)
|
|
{
|
|
Atomic32FetchSet(&old_entry->out_of_date, 1);
|
|
}
|
|
}
|
|
e->load_time_ns = TimeNs();
|
|
}
|
|
Unlock(&bin_lock);
|
|
#endif
|
|
|
|
EndScratch(scratch);
|
|
}
|
|
|
|
////////////////////////////////
|
|
//~ Ref
|
|
|
|
void S_AddRef(S_CacheEntry *e, i32 amount)
|
|
{
|
|
S_SharedState *g = &S_shared_state;
|
|
i32 evictor_cycle = Atomic32Fetch(&g->evictor_cycle.v);
|
|
Atomic64 *refcount_atomic = &e->refcount_struct.v;
|
|
u64 old_refcount_uncast = Atomic64Fetch(refcount_atomic);
|
|
for (;;)
|
|
{
|
|
S_Refcount new_refcount = *(S_Refcount *)&old_refcount_uncast;
|
|
new_refcount.count += amount;
|
|
new_refcount.last_ref_cycle = evictor_cycle;
|
|
u64 v = Atomic64FetchTestSet(refcount_atomic, old_refcount_uncast, *(u64 *)&new_refcount);
|
|
if (v == old_refcount_uncast)
|
|
{
|
|
Assert(new_refcount.count >= 0);
|
|
break;
|
|
}
|
|
old_refcount_uncast = v;
|
|
}
|
|
}
|
|
|
|
S_ScopeCacheEntryRef *S_EnsureRefUnsafely(S_Scope *scope, S_CacheEntry *e)
|
|
{
|
|
u64 bin_index = e->hash.v % S_CacheBinsCount;
|
|
|
|
S_ScopeCacheEntryRef **slot = &scope->ref_node_bins[bin_index];
|
|
while (*slot)
|
|
{
|
|
if ((*slot)->ref.e == e)
|
|
{
|
|
/* Found reference in scope */
|
|
break;
|
|
}
|
|
else
|
|
{
|
|
slot = &(*slot)->next_in_bin;
|
|
}
|
|
}
|
|
|
|
if (*slot == 0)
|
|
{
|
|
if (scope->num_references >= S_MaxScopeReferences)
|
|
{
|
|
Panic(Lit("Max sprite scope references reached"));
|
|
}
|
|
|
|
/* Increment refcount */
|
|
S_AddRef(e, 1);
|
|
|
|
/* Grab node from pool */
|
|
S_ScopeCacheEntryRef *scope_ref = &scope->ref_node_pool[scope->num_references++];
|
|
ZeroStruct(scope_ref);
|
|
scope_ref->ref.e = e;
|
|
|
|
*slot = scope_ref;
|
|
}
|
|
|
|
return *slot;
|
|
}
|
|
|
|
S_ScopeCacheEntryRef *S_EnsureRefFromEntryLocked(S_Scope *scope, S_CacheEntry *e, Lock *bin_lock)
|
|
{
|
|
S_SharedState *g = &S_shared_state;
|
|
LAX g;
|
|
/* Guaranteed safe if caller has lock on entry's bin, since entry may not have an existing reference and could otherwise be evicted while ensuring this reference */
|
|
AssertLockedES(bin_lock, &g->cache.bins[e->hash.v % S_CacheBinsCount].mutex);
|
|
return S_EnsureRefUnsafely(scope, e);
|
|
}
|
|
|
|
S_ScopeCacheEntryRef *S_EnsureRefFromRef(S_Scope *scope, S_CacheEntryRef ref)
|
|
{
|
|
/* Safe since caller has ref */
|
|
return S_EnsureRefUnsafely(scope, ref.e);
|
|
}
|
|
|
|
////////////////////////////////
|
|
//~ Scope
|
|
|
|
S_Scope *S_BeginScope(void)
|
|
{
|
|
S_SharedState *g = &S_shared_state;
|
|
/* Acquire scope */
|
|
S_Scope *result = 0;
|
|
S_ScopeCacheEntryRef **bins = 0;
|
|
S_ScopeCacheEntryRef *pool = 0;
|
|
{
|
|
Lock lock = LockE(&g->scopes_mutex);
|
|
{
|
|
if (g->first_free_scope)
|
|
{
|
|
result = g->first_free_scope;
|
|
g->first_free_scope = result->next_free;
|
|
bins = result->ref_node_bins;
|
|
pool = result->ref_node_pool;
|
|
}
|
|
else
|
|
{
|
|
result = PushStructNoZero(g->scopes_arena, S_Scope);
|
|
bins = PushStructsNoZero(g->scopes_arena, S_ScopeCacheEntryRef *, S_CacheBinsCount);
|
|
pool = PushStructsNoZero(g->scopes_arena, S_ScopeCacheEntryRef, S_MaxScopeReferences);
|
|
}
|
|
}
|
|
Unlock(&lock);
|
|
}
|
|
ZeroStruct(result);
|
|
ZeroBytes(bins, sizeof(*bins) * S_CacheBinsCount);
|
|
result->ref_node_bins = bins;
|
|
result->ref_node_pool = pool;
|
|
return result;
|
|
}
|
|
|
|
void S_EndScope(S_Scope *scope)
|
|
{
|
|
S_SharedState *g = &S_shared_state;
|
|
/* Dereference entries */
|
|
u64 num_references = scope->num_references;
|
|
for (u64 i = 0; i < num_references; ++i)
|
|
{
|
|
S_ScopeCacheEntryRef *n = &scope->ref_node_pool[i];
|
|
S_AddRef(n->ref.e, -1);
|
|
}
|
|
|
|
/* Release scope */
|
|
Lock lock = LockE(&g->scopes_mutex);
|
|
{
|
|
scope->next_free = g->first_free_scope;
|
|
g->first_free_scope = scope;
|
|
}
|
|
Unlock(&lock);
|
|
}
|
|
|
|
////////////////////////////////
|
|
//~ Cache lookup
|
|
|
|
//- Locked lookup
|
|
|
|
S_ScopeCacheEntryRef *S_EntryFromHashLocked(S_Scope *scope, S_Hash hash, Lock *bin_lock)
|
|
{
|
|
S_SharedState *g = &S_shared_state;
|
|
S_ScopeCacheEntryRef *scope_ref = 0;
|
|
|
|
S_CacheEntryBin *bin = &g->cache.bins[hash.v % S_CacheBinsCount];
|
|
AssertLockedES(bin_lock, &bin->mutex); /* Lock required for iterating bin */
|
|
|
|
#if RESOURCE_RELOADING
|
|
/* If resource reloading is enabled, then we want to find the
|
|
* newest entry rather than the first one that exists since
|
|
* there may be more than one matching entry in the cache */
|
|
S_CacheEntry *match = 0;
|
|
S_CacheEntryState match_state = S_CacheEntryState_None;
|
|
for (S_CacheEntry *entry = bin->first; entry; entry = entry->next_in_bin)
|
|
{
|
|
if (entry->hash.v == hash.v)
|
|
{
|
|
S_CacheEntryState entry_state = Atomic32Fetch(&entry->state);
|
|
if (!match || entry_state > match_state || (entry_state == S_CacheEntryState_Loaded && match_state == S_CacheEntryState_Loaded && entry->load_time_ns > match->load_time_ns))
|
|
{
|
|
match = entry;
|
|
match_state = entry_state;
|
|
}
|
|
}
|
|
}
|
|
if (match)
|
|
{
|
|
scope_ref = S_EnsureRefFromEntryLocked(scope, match, bin_lock);
|
|
}
|
|
#else
|
|
for (S_CacheEntry *entry = bin->first; entry; entry = entry->next_in_bin)
|
|
{
|
|
if (entry->hash.v == hash.v)
|
|
{
|
|
scope_ref = S_EnsureRefFromEntryLocked(scope, entry, bin_lock);
|
|
break;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
return scope_ref;
|
|
}
|
|
|
|
//- Unlocked lookup
|
|
|
|
S_ScopeCacheEntryRef *S_EntryFromTag(S_Scope *scope, S_Tag tag, S_CacheEntryKind kind, b32 force_new)
|
|
{
|
|
S_SharedState *g = &S_shared_state;
|
|
S_Hash hash = S_CacheEntryFromTagHash(tag.hash, kind);
|
|
u64 bin_index = hash.v % S_CacheBinsCount;
|
|
S_ScopeCacheEntryRef *scope_ref = 0;
|
|
|
|
/* Search for entry in scope */
|
|
if (!force_new)
|
|
{
|
|
scope_ref = scope->ref_node_bins[bin_index];
|
|
while (scope_ref)
|
|
{
|
|
if (scope_ref->ref.e->hash.v == hash.v)
|
|
{
|
|
break;
|
|
}
|
|
scope_ref = scope_ref->next_in_bin;
|
|
}
|
|
}
|
|
|
|
/* If not in scope, search for entry in cache */
|
|
if (!scope_ref)
|
|
{
|
|
S_CacheEntryBin *bin = &g->cache.bins[bin_index];
|
|
|
|
/* Search in cache */
|
|
if (!force_new)
|
|
{
|
|
Lock bin_lock = LockS(&bin->mutex);
|
|
{
|
|
scope_ref = S_EntryFromHashLocked(scope, hash, &bin_lock);
|
|
}
|
|
Unlock(&bin_lock);
|
|
}
|
|
|
|
/* If not in cache, allocate new entry */
|
|
if (!scope_ref)
|
|
{
|
|
Lock bin_lock = LockE(&bin->mutex);
|
|
{
|
|
/* Search cache one more time in case an entry was allocated between locks */
|
|
if (!force_new)
|
|
{
|
|
scope_ref = S_EntryFromHashLocked(scope, hash, &bin_lock);
|
|
}
|
|
|
|
if (!scope_ref)
|
|
{
|
|
/* Cache entry still absent, allocate new entry */
|
|
S_CacheEntry *entry = 0;
|
|
{
|
|
Lock pool_lock = LockE(&g->cache.entry_pool_mutex);
|
|
if (g->cache.entry_pool_first_free)
|
|
{
|
|
entry = g->cache.entry_pool_first_free;
|
|
g->cache.entry_pool_first_free = entry->next_free;
|
|
}
|
|
else
|
|
{
|
|
entry = PushStructNoZero(g->cache.arena, S_CacheEntry);
|
|
}
|
|
Unlock(&pool_lock);
|
|
}
|
|
ZeroStruct(entry);
|
|
|
|
/* Init entry and add to bin */
|
|
{
|
|
if (bin->last)
|
|
{
|
|
bin->last->next_in_bin = entry;
|
|
entry->prev_in_bin = bin->last;
|
|
}
|
|
else
|
|
{
|
|
bin->first = entry;
|
|
}
|
|
bin->last = entry;
|
|
}
|
|
entry->hash = S_CacheEntryFromTagHash(tag.hash, kind);
|
|
entry->kind = kind;
|
|
entry->texture = g->nil_texture;
|
|
entry->sheet = g->nil_sheet;
|
|
|
|
scope_ref = S_EnsureRefFromEntryLocked(scope, entry, &bin_lock);
|
|
}
|
|
}
|
|
Unlock(&bin_lock);
|
|
}
|
|
}
|
|
|
|
return scope_ref;
|
|
}
|
|
|
|
//- Data from tag
|
|
|
|
void *S_DataFromTag(S_Scope *scope, S_Tag tag, S_CacheEntryKind kind, b32 await)
|
|
{
|
|
S_SharedState *g = &S_shared_state;
|
|
/* TODO: Replace switch statements */
|
|
void *result = 0;
|
|
switch (kind)
|
|
{
|
|
case S_CacheEntryKind_Texture: { result = g->loading_texture; } break;
|
|
case S_CacheEntryKind_Sheet: { result = g->loading_sheet; } break;
|
|
default: { Panic(Lit("Unknown sprite cache entry kind")); } break;
|
|
}
|
|
|
|
S_ScopeCacheEntryRef *scope_ref = S_EntryFromTag(scope, tag, kind, 0);
|
|
S_CacheEntryRef ref = scope_ref->ref;
|
|
|
|
S_CacheEntryState state = Atomic32Fetch(&ref.e->state);
|
|
if (state == S_CacheEntryState_Loaded)
|
|
{
|
|
switch (kind)
|
|
{
|
|
case S_CacheEntryKind_Texture: { result = ref.e->texture; } break;
|
|
case S_CacheEntryKind_Sheet: { result = ref.e->sheet; } break;
|
|
default: { Panic(Lit("Unknown sprite cache entry kind")); } break;
|
|
}
|
|
}
|
|
else if (state == S_CacheEntryState_None)
|
|
{
|
|
/* If entry is new, load texture */
|
|
if (Atomic32FetchTestSet(&ref.e->state, S_CacheEntryState_None, S_CacheEntryState_Queued) == S_CacheEntryState_None)
|
|
{
|
|
/* If caller is awaiting result then just load now on the calling thread. Otherwise spawn a work task. */
|
|
if (await)
|
|
{
|
|
switch (kind)
|
|
{
|
|
case S_CacheEntryKind_Texture:
|
|
{
|
|
S_LoadCacheEntryTexture(ref, tag);
|
|
result = ref.e->texture;
|
|
} break;
|
|
case S_CacheEntryKind_Sheet:
|
|
{
|
|
S_LoadCacheEntrySheet(ref, tag);
|
|
result = ref.e->sheet;
|
|
} break;
|
|
default: { Panic(Lit("Unknown sprite cache entry kind")); } break;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
/* Acquire cmd */
|
|
S_PushLoadJob(ref, tag);
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Spinlock until result is ready */
|
|
if (await && state != S_CacheEntryState_Loaded)
|
|
{
|
|
while (Atomic32Fetch(&ref.e->state) != S_CacheEntryState_Loaded)
|
|
{
|
|
_mm_pause();
|
|
}
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
////////////////////////////////
|
|
//~ Texture retrieval
|
|
|
|
S_Texture *S_TextureFromTagAwait(S_Scope *scope, S_Tag tag)
|
|
{
|
|
return (S_Texture *)S_DataFromTag(scope, tag, S_CacheEntryKind_Texture, 1);
|
|
}
|
|
|
|
S_Texture *S_TextureFromTagAsync(S_Scope *scope, S_Tag tag)
|
|
{
|
|
return (S_Texture *)S_DataFromTag(scope, tag, S_CacheEntryKind_Texture, 0);
|
|
}
|
|
|
|
|
|
void S_PrefetchTextureFromTag(S_Scope *scope, S_Tag tag)
|
|
{
|
|
S_DataFromTag(scope, tag, S_CacheEntryKind_Texture, 0);
|
|
}
|
|
|
|
////////////////////////////////
|
|
//~ Sheet retrieval
|
|
|
|
S_Sheet *S_SheetFromTagAwait(S_Scope *scope, S_Tag tag)
|
|
{
|
|
return (S_Sheet *)S_DataFromTag(scope, tag, S_CacheEntryKind_Sheet, 1);
|
|
}
|
|
|
|
S_Sheet *S_SheetFromTagAsync(S_Scope *scope, S_Tag tag)
|
|
{
|
|
return (S_Sheet *)S_DataFromTag(scope, tag, S_CacheEntryKind_Sheet, 0);
|
|
}
|
|
|
|
void S_PrefetchSheetFromTag(S_Scope *scope, S_Tag tag)
|
|
{
|
|
S_DataFromTag(scope, tag, S_CacheEntryKind_Sheet, 0);
|
|
}
|
|
|
|
////////////////////////////////
|
|
//~ Sheet data
|
|
|
|
S_Frame S_FrameFromIndex(S_Sheet *sheet, u32 index)
|
|
{
|
|
if (index < sheet->frames_count)
|
|
{
|
|
return sheet->frames[index];
|
|
}
|
|
S_Frame result = ZI;
|
|
result.index = 0;
|
|
result.duration = 0.1;
|
|
result.clip = AllClipped;
|
|
return result;
|
|
}
|
|
|
|
S_Span S_SpanFromName(S_Sheet *sheet, String name)
|
|
{
|
|
S_Span result = ZI;
|
|
if (sheet->spans_count > 0)
|
|
{
|
|
u64 hash = HashFnv64(Fnv64Basis, name);
|
|
S_Span *entry = (S_Span *)DictValueFromHash(sheet->spans_dict, hash);
|
|
if (entry)
|
|
{
|
|
result = *entry;
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
/* Returns first slice with name in frame */
|
|
S_Slice S_SliceFromNameIndex(S_Sheet *sheet, String name, u32 frame_index)
|
|
{
|
|
if (sheet->slice_groups_count > 0)
|
|
{
|
|
u64 hash = HashFnv64(Fnv64Basis, name);
|
|
S_SheetSliceGroup *group = (S_SheetSliceGroup *)DictValueFromHash(sheet->slice_groups_dict, hash);
|
|
if (group)
|
|
{
|
|
return group->frame_slices[frame_index * group->per_frame_count];
|
|
}
|
|
}
|
|
|
|
/* Return 'pivot' by default */
|
|
S_Slice result = ZI;
|
|
if (EqString(name, Lit("pivot")))
|
|
{
|
|
/* 'pivot' slice does not exist, return center */
|
|
result.center = VEC2(0, 0);
|
|
result.center_px = MulVec2(sheet->frame_size, 0.5f);
|
|
result.dir_px = VEC2(result.center_px.x, 0);
|
|
result.dir = VEC2(0, -0.5);
|
|
}
|
|
else
|
|
{
|
|
result = S_SliceFromNameIndex(sheet, Lit("pivot"), frame_index);
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
/* Returns all slices with name in frame */
|
|
S_SliceArray S_SlicesFromNameIndex(S_Sheet *sheet, String name, u32 frame_index)
|
|
{
|
|
S_SliceArray result = ZI;
|
|
if (sheet->slice_groups_count > 0)
|
|
{
|
|
u64 hash = HashFnv64(Fnv64Basis, name);
|
|
S_SheetSliceGroup *group = (S_SheetSliceGroup *)DictValueFromHash(sheet->slice_groups_dict, hash);
|
|
if (group)
|
|
{
|
|
result.count = group->per_frame_count;
|
|
result.slices = &group->frame_slices[frame_index * group->per_frame_count];
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
////////////////////////////////
|
|
//~ Resource reloading
|
|
|
|
#if RESOURCE_RELOADING
|
|
|
|
void S_ReloadSpriteFromTag(S_Scope *scope, S_Tag tag, S_CacheEntryKind kind)
|
|
{
|
|
S_SharedState *g = &S_shared_state;
|
|
S_Hash hash = S_CacheEntryFromTagHash(tag.hash, kind);
|
|
S_CacheEntryBin *bin = &g->cache.bins[hash.v % S_CacheBinsCount];
|
|
S_ScopeCacheEntryRef *existing_ref = 0;
|
|
Lock bin_lock = LockS(&bin->mutex);
|
|
{
|
|
existing_ref = S_EntryFromHashLocked(scope, hash, &bin_lock);
|
|
}
|
|
Unlock(&bin_lock);
|
|
|
|
if (existing_ref)
|
|
{
|
|
P_LogInfoF("Sprite resource file \"%F\" has changed for sprite [%F].", FmtString(tag.path), FmtHex(hash.v));
|
|
S_ScopeCacheEntryRef *scope_ref = S_EntryFromTag(scope, tag, kind, 1);
|
|
S_PushLoadJob(scope_ref->ref, tag);
|
|
}
|
|
}
|
|
|
|
W_CallbackFuncDef(S_WatchSpriteCallback, name)
|
|
{
|
|
S_Scope *scope = S_BeginScope();
|
|
|
|
if (StringStartsWith(name, Lit("res/")))
|
|
{
|
|
name.len -= Lit("res/").len;
|
|
name.text += Lit("res/").len;
|
|
}
|
|
|
|
S_Tag tag = S_TagFromPath(name);
|
|
for (S_CacheEntryKind kind = 0; kind < S_CacheEntryKind_Count; ++kind)
|
|
{
|
|
S_ReloadSpriteFromTag(scope, tag, kind);
|
|
}
|
|
|
|
S_EndScope(scope);
|
|
}
|
|
|
|
#endif
|
|
|
|
////////////////////////////////
|
|
//~ Evictor
|
|
|
|
MergesortCompareFuncDef(S_EvictorSortCmp, arg_a, arg_b, _)
|
|
{
|
|
S_EvictorNode *a = arg_a;
|
|
S_EvictorNode *b = arg_b;
|
|
i32 a_cycle = a->last_ref_cycle;
|
|
i32 b_cycle = b->last_ref_cycle;
|
|
return (b_cycle > a_cycle) - (a_cycle > b_cycle);
|
|
}
|
|
|
|
/* NOTE:
|
|
* A cache node is safe from eviction as long as:
|
|
* - Its bin mutex is locked
|
|
* - Any references are held to the node (its refcount > 0)
|
|
*
|
|
* An attempt to evict a cache node will occur when:
|
|
* - Its refcount = 0 and
|
|
* - The cache is over its memory budget and the node's last reference is longer ago than the grace period
|
|
* - Resource reloading is enabled and the node is out of date due to a change to its original resource file
|
|
*/
|
|
JobDef(S_EvictorJob, UNUSED sig, UNUSED job_id)
|
|
{
|
|
S_SharedState *g = &S_shared_state;
|
|
b32 shutdown = 0;
|
|
while (!shutdown)
|
|
{
|
|
{
|
|
__profn("Sprite evictor cycle");
|
|
TempArena scratch = BeginScratchNoConflict();
|
|
u64 evict_array_count = 0;
|
|
S_EvictorNode *evict_array = PushDry(scratch.arena, S_EvictorNode);
|
|
{
|
|
i32 cur_cycle = Atomic32Fetch(&g->evictor_cycle.v);
|
|
|
|
/* Scan for evictable nodes */
|
|
b32 cache_over_budget_threshold = Atomic64Fetch(&g->cache.memory_usage.v) > (i64)S_CacheMemoryBudgetThreshold;
|
|
if (cache_over_budget_threshold || RESOURCE_RELOADING)
|
|
{
|
|
__profn("Evictor scan");
|
|
for (u64 i = 0; i < S_CacheBinsCount; ++i)
|
|
{
|
|
S_CacheEntryBin *bin = &g->cache.bins[i];
|
|
Lock bin_lock = LockS(&bin->mutex);
|
|
{
|
|
S_CacheEntry *n = bin->first;
|
|
while (n)
|
|
{
|
|
u64 refcount_uncast = Atomic64Fetch(&n->refcount_struct.v);
|
|
S_Refcount refcount = *(S_Refcount *)&refcount_uncast;
|
|
if (refcount.count <= 0)
|
|
{
|
|
/* Add node to evict list */
|
|
#if RESOURCE_RELOADING
|
|
b32 is_out_of_date = Atomic32Fetch(&n->out_of_date);
|
|
#else
|
|
b32 is_out_of_date = 0;
|
|
#endif
|
|
b32 is_old = cache_over_budget_threshold && ((cur_cycle - refcount.last_ref_cycle) > S_EvictorGracePeriodCycles);
|
|
if (is_old || is_out_of_date)
|
|
{
|
|
S_EvictorNode *en = PushStruct(scratch.arena, S_EvictorNode);
|
|
en->cache_entry = n;
|
|
en->cache_bin = bin;
|
|
en->last_ref_cycle = refcount.last_ref_cycle;
|
|
if (is_out_of_date)
|
|
{
|
|
en->last_ref_cycle = -1;
|
|
}
|
|
++evict_array_count;
|
|
}
|
|
}
|
|
|
|
n = n->next_in_bin;
|
|
}
|
|
}
|
|
Unlock(&bin_lock);
|
|
}
|
|
}
|
|
|
|
/* Scratch arena should only contain evict array at this point */
|
|
Assert(((ArenaBase(scratch.arena) + scratch.arena->pos) - (sizeof(*evict_array) * evict_array_count)) == (u8 *)evict_array);
|
|
|
|
/* Sort evict nodes */
|
|
{
|
|
__profn("Evictor sort");
|
|
Mergesort(evict_array, evict_array_count, sizeof(*evict_array), S_EvictorSortCmp, 0);
|
|
}
|
|
|
|
/* Remove evictable nodes from cache until under budget */
|
|
S_EvictorNode *first_evicted = 0;
|
|
{
|
|
__profn("Evictor cache removal");
|
|
b32 stop_evicting = 0;
|
|
for (u64 i = 0; i < evict_array_count && !stop_evicting; ++i)
|
|
{
|
|
S_EvictorNode *en = &evict_array[i];
|
|
S_CacheEntryBin *bin = en->cache_bin;
|
|
S_CacheEntry *entry = en->cache_entry;
|
|
i32 last_ref_cycle = en->last_ref_cycle;
|
|
b32 cache_over_budget_target = Atomic64Fetch(&g->cache.memory_usage.v) > (i64)S_CacheMemoryBudgetTarget;
|
|
Lock bin_lock = LockE(&bin->mutex);
|
|
{
|
|
u64 refcount_uncast = Atomic64Fetch(&entry->refcount_struct.v);
|
|
S_Refcount refcount = *(S_Refcount *)&refcount_uncast;
|
|
if (refcount.count > 0 || (last_ref_cycle >= 0 && refcount.last_ref_cycle != en->last_ref_cycle))
|
|
{
|
|
/* Cache node has been referenced since scan, skip node. */
|
|
}
|
|
else if (cache_over_budget_target || last_ref_cycle < 0)
|
|
{
|
|
/* Remove from cache bin */
|
|
S_CacheEntry *prev = entry->prev_in_bin;
|
|
S_CacheEntry *next = entry->next_in_bin;
|
|
if (prev)
|
|
{
|
|
prev->next_in_bin = next;
|
|
}
|
|
else
|
|
{
|
|
bin->first = next;
|
|
}
|
|
if (next)
|
|
{
|
|
next->prev_in_bin = prev;
|
|
}
|
|
else
|
|
{
|
|
bin->last = prev;
|
|
}
|
|
|
|
Atomic64FetchAdd(&g->cache.memory_usage.v, -((i64)entry->memory_usage));
|
|
|
|
/* Add to evicted list */
|
|
en->next_evicted = first_evicted;
|
|
first_evicted = en;
|
|
}
|
|
else
|
|
{
|
|
/* Cache is no longer over budget or force evicting, stop iteration */
|
|
stop_evicting = 1;
|
|
}
|
|
}
|
|
Unlock(&bin_lock);
|
|
}
|
|
}
|
|
|
|
if (first_evicted)
|
|
{
|
|
/* Release evicted node memory */
|
|
{
|
|
__profn("Evictor memory release");
|
|
GPU_Fence gpu_fence = GPU_GetGlobalFence();
|
|
for (S_EvictorNode *en = first_evicted; en; en = en->next_evicted)
|
|
{
|
|
S_CacheEntry *n = en->cache_entry;
|
|
if (n->kind == S_CacheEntryKind_Texture && n->texture->valid)
|
|
{
|
|
GPU_ReleaseResource(n->texture->gpu_resource, gpu_fence, GPU_ReleaseFlag_None);
|
|
}
|
|
ReleaseArena(n->arena);
|
|
}
|
|
}
|
|
|
|
/* Add evicted nodes to free list */
|
|
{
|
|
__profn("Evictor free list append");
|
|
Lock pool_lock = LockE(&g->cache.entry_pool_mutex);
|
|
for (S_EvictorNode *en = first_evicted; en; en = en->next_evicted)
|
|
{
|
|
S_CacheEntry *n = en->cache_entry;
|
|
n->next_free = g->cache.entry_pool_first_free;
|
|
g->cache.entry_pool_first_free = n;
|
|
}
|
|
Unlock(&pool_lock);
|
|
}
|
|
}
|
|
}
|
|
Atomic32FetchAdd(&g->evictor_cycle.v, 1);
|
|
EndScratch(scratch);
|
|
}
|
|
|
|
/* Evictor sleep */
|
|
{
|
|
Lock lock = LockE(&g->evictor_scheduler_mutex);
|
|
{
|
|
if (!g->evictor_scheduler_shutdown)
|
|
{
|
|
YieldOnCvTime(&g->evictor_scheduler_shutdown_cv, &lock, S_EvictorCycleIntervalNs);
|
|
}
|
|
shutdown = g->evictor_scheduler_shutdown;
|
|
}
|
|
Unlock(&lock);
|
|
}
|
|
}
|
|
}
|