power_play/src/base/base_arena.h
2025-07-29 20:01:35 -05:00

223 lines
5.8 KiB
C

////////////////////////////////
//~ Arena types
#define ArenaHeaderSize 256
#define ArenaBlockSize 16384
Struct(Arena)
{
u64 pos;
u64 committed;
u64 reserved;
#if RTC
b32 readonly;
#endif
};
Struct(TempArena) {
Arena *arena;
u64 start_pos;
#if RTC
u64 scratch_id;
#endif
};
////////////////////////////////
//~ Scratch types
#define ScratchArenasPerCtx 2
Struct(ScratchCtx)
{
Arena *arenas[ScratchArenasPerCtx];
};
////////////////////////////////
//~ Shared state
Struct(SharedScratchCtx)
{
ScratchCtx scratch_contexts[MAX_FIBERS];
};
extern SharedScratchCtx shared_scratch_ctx;
////////////////////////////////
//~ Arena push/pop
#define PushStruct(a, type) ((type *)PushBytes((a), sizeof(type), alignof(type)))
#define PushStructNoZero(a, type) ((type *)PushBytesNoZero((a), sizeof(type), alignof(type)))
#define PushArray(a, type, n) ((type *)PushBytes((a), (sizeof(type) * (n)), alignof(type)))
#define PushArrayNoZero(a, type, n) ((type *)PushBytesNoZero((a), (sizeof(type) * (n)), alignof(type)))
#define PopStruct(a, type, dst) PopBytes((a), sizeof(type), dst)
#define PopArray(a, type, n, dst) PopBytes((a), sizeof(type) * (n), dst)
/* Returns a pointer to where the next allocation would be (at alignment of type).
* Equivalent to PushStruct but without actually allocating anything or modifying the arena. */
#define PushDry(a, type) (type *)(_PushDry((a), alignof(type)))
void *PushBytesNoZero(Arena *arena, u64 size, u64 align);
INLINE void *PushBytes(Arena *arena, u64 size, u64 align)
{
void *p = PushBytesNoZero(arena, size, align);
MEMZERO(p, size);
return p;
}
INLINE u8 *ArenaBase(Arena *arena)
{
return (u8 *)arena + ArenaHeaderSize;
}
INLINE void PopTo(Arena *arena, u64 pos)
{
ASSERT(arena->pos >= pos);
ASSERT(!arena->readonly);
ASAN_POISON(ArenaBase(arena) + pos, arena->pos - pos);
arena->pos = pos;
}
INLINE void PopBytes(Arena *arena, u64 size, void *copy_dst)
{
ASSERT(arena->pos >= size);
ASSERT(!arena->readonly);
u64 new_pos = arena->pos - size;
void *src = (void *)(ArenaBase(arena) + new_pos);
MEMCPY(copy_dst, src, size);
ASAN_POISON(ArenaBase(arena) + new_pos, arena->pos - new_pos);
arena->pos = new_pos;
}
INLINE void *_PushDry(Arena *arena, u64 align)
{
u64 aligned_start_pos = (arena->pos + (align - 1));
aligned_start_pos -= aligned_start_pos % align;
void *ptr = ArenaBase(arena) + aligned_start_pos;
return ptr;
}
////////////////////////////////
//~ Arena management
Arena *AllocArena(u64 reserve);
void ReleaseArena(Arena *arena);
void CopyArena(Arena *dst, Arena *src);
void ShrinkArena(Arena *arena);
void SetArenaReadonly(Arena *arena);
void SetArenaReadWrite(Arena *arena);
INLINE void *AlignArena(Arena *arena, u64 align)
{
ASSERT(!arena->readonly);
if (align > 0) {
u64 aligned_start_pos = (arena->pos + (align - 1));
aligned_start_pos -= aligned_start_pos % align;
u64 align_bytes = aligned_start_pos - (u64)arena->pos;
if (align_bytes > 0) {
return (void *)PushArrayNoZero(arena, u8, align_bytes);
} else {
return (void *)(ArenaBase(arena) + arena->pos);
}
} else {
/* 0 alignment */
ASSERT(0);
return (void *)(ArenaBase(arena) + arena->pos);
}
}
INLINE void ResetArena(Arena *arena)
{
PopTo(arena, 0);
}
////////////////////////////////
//~ Temp arena
INLINE TempArena BeginTempArena(Arena *arena)
{
TempArena t = ZI;
t.arena = arena;
t.start_pos = arena->pos;
return t;
}
INLINE void EndTempArena(TempArena temp)
{
PopTo(temp.arena, temp.start_pos);
}
////////////////////////////////
//~ Scratch
INLINE ScratchCtx *ScratchCtxFromFiberId(i16 fiber_id)
{
SharedScratchCtx *shared = &shared_scratch_ctx;
ScratchCtx *ctx = &shared->scratch_contexts[fiber_id];
if (!ctx->arenas[0]) {
for (i32 i = 0; i < (i32)countof(ctx->arenas); ++i) {
ctx->arenas[i] = AllocArena(GIBI(64));
}
}
return ctx;
}
/* Any parameterized arenas in the caller's scope should be passed into this
* function as a potential "conflict". This is to prevent friction in case the
* passed arena is itself a scratch arena from another scope (since
* parameterized arenas are often used to allocate persistent results for the
* caller).
*
* Use `BeginScratchNoConflict` instead if there is no arena in the current
* scope that could potentially be a scratch arena from another scope. */
#define BeginScratch(potential_conflict) _BeginScratch(potential_conflict)
INLINE TempArena _BeginScratch(Arena *potential_conflict)
{
/* This function is currently hard-coded to support 2 scratch arenas */
STATIC_ASSERT(ScratchArenasPerCtx == 2);
/* Use `BeginScratchNoConflict` if no conflicts are present */
ASSERT(potential_conflict != 0);
ScratchCtx *ctx = ScratchCtxFromFiberId(FiberId());
Arena *scratch_arena = ctx->arenas[0];
if (potential_conflict && scratch_arena == potential_conflict) {
scratch_arena = ctx->arenas[1];
}
TempArena temp = BeginTempArena(scratch_arena);
return temp;
}
/* This macro declares an unused "arena" variable that will error if an existing "arena"
* variable is present (due to shadowing). This is for catching obvious cases of
* `BeginScratchNoConflict` getting called when an `arena` variable already
* exists in the caller's scope (`BeginScratch(arena)` should be called
* instead). */
#define BeginScratchNoConflict() \
_BeginScratchNoConflict(); \
do { \
u8 arena = 0; \
(UNUSED)arena; \
} while (0)
INLINE TempArena _BeginScratchNoConflict(void)
{
ScratchCtx *ctx = ScratchCtxFromFiberId(FiberId());
Arena *scratch_arena = ctx->arenas[0];
TempArena temp = BeginTempArena(scratch_arena);
return temp;
}
INLINE void EndScratch(TempArena scratch_temp)
{
EndTempArena(scratch_temp);
}