power_play/src/scratch.h
2024-02-29 16:01:51 -06:00

135 lines
4.1 KiB
C

#ifndef SCRATCH_H
#define SCRATCH_H
#include "arena.h"
#include "sys.h"
#define SCRATCH_ARENAS_PER_THREAD 2
#define SCRATCH_ARENA_RESERVE (GIGABYTE(64))
struct scratch_context {
struct arena arenas[SCRATCH_ARENAS_PER_THREAD];
#if RTC
u64 next_scratch_id;
u64 scratch_id_stack[16384];
u64 scratch_id_stack_count;
#endif
};
/* Any arena parameters in the calling function's context should be passed into this
* function as a potential `conflict`. This is to prevent conflicts when the
* context's arena is itself a scratch arena (since parameterized arenas are
* often used to allocate persistent results for the caller).
*
* Call `scratch_begin_no_conflict` instead if there is no arena in the current
* context that could potentially be a scratch arena. */
#define scratch_begin(c) _scratch_begin(c)
INLINE struct temp_arena _scratch_begin(struct arena *potential_conflict)
{
/* This function is currently hard-coded to support 2 scratch arenas */
CT_ASSERT(SCRATCH_ARENAS_PER_THREAD == 2);
/* Use `scratch_begin_no_conflict` if no conflicts are present */
ASSERT(potential_conflict != NULL);
struct scratch_context *ctx = sys_thread_get_scratch_context();
struct arena *scratch = &ctx->arenas[0];
if (potential_conflict && scratch->base == potential_conflict->base) {
scratch = &ctx->arenas[1];
}
struct temp_arena temp = arena_push_temp(scratch);
#if RTC
if (ctx->scratch_id_stack_count >= ARRAY_COUNT(ctx->scratch_id_stack)) {
sys_panic(STR("Max debug scratch depth reached"));
}
temp.scratch_id = ctx->next_scratch_id++;
ctx->scratch_id_stack[ctx->scratch_id_stack_count++] = temp.scratch_id;
#endif
return temp;
}
/* This macro declares an unused "arena" variable that will error if an existing "arena"
* variable is present (due to shadowing). This is for catching obvious cases of
* `scratch_begin_no_conflict` getting called when an `arena` variable already
* exists in the caller's context (`scratch_begin(arena)` should be called
* instead). */
#define scratch_begin_no_conflict() \
_scratch_begin_no_conflict(); \
do { \
struct arena *arena = NULL; \
(UNUSED)arena; \
} while (0)
INLINE struct temp_arena _scratch_begin_no_conflict(void)
{
struct scratch_context *ctx = sys_thread_get_scratch_context();
struct arena *scratch = &ctx->arenas[0];
struct temp_arena temp = arena_push_temp(scratch);
#if RTC
if (ctx->scratch_id_stack_count >= ARRAY_COUNT(ctx->scratch_id_stack)) {
sys_panic(STR("Max debug scratch depth reached"));
}
temp.scratch_id = ctx->next_scratch_id++;
ctx->scratch_id_stack[ctx->scratch_id_stack_count++] = temp.scratch_id;
#endif
return temp;
}
INLINE void scratch_end(struct temp_arena scratch_temp)
{
#if RTC
struct scratch_context *ctx = sys_thread_get_scratch_context();
if (ctx->scratch_id_stack_count > 0) {
u64 scratch_id = scratch_temp.scratch_id;
u64 expected_id = ctx->scratch_id_stack[--ctx->scratch_id_stack_count];
/* This assertion exists to catch cases where a scratch_end was forgotten.
* It will fail if a scratch arena is reset out of order.
* IE there is a missing scratch_end somewhere on a different scratch
* arena (one that was created between the scratch_begin & the
* scratch_end of the arena being reset here). */
ASSERT(scratch_id == expected_id);
}
#endif
arena_pop_temp(scratch_temp);
}
INLINE void scratch_end_and_decommit(struct temp_arena scratch_temp)
{
scratch_end(scratch_temp);
/* Disabled for now */
// arena_decommit_unused_blocks(scratch_temp.arena);
}
/* ========================== *
* Scratch context
* ========================== */
INLINE struct scratch_context scratch_context_alloc(void)
{
struct scratch_context ctx = { 0 };
for (u32 i = 0; i < ARRAY_COUNT(ctx.arenas); ++i) {
ctx.arenas[i] = arena_alloc(SCRATCH_ARENA_RESERVE);
}
return ctx;
}
INLINE void scratch_context_release(struct scratch_context *ctx)
{
for (u32 i = 0; i < ARRAY_COUNT(ctx->arenas); ++i) {
arena_release(&ctx->arenas[i]);
}
}
#endif