power_play/src/arena.h

184 lines
5.7 KiB
C

#ifndef ARENA_H
#define ARENA_H
#include "sys.h"
#include "memory.h"
#define ARENA_HEADER_SIZE 256
#define ARENA_BLOCK_SIZE 16384
#define arena_push(a, type) ((type *)arena_push_bytes((a), sizeof(type), alignof(type)))
#define arena_push_no_zero(a, type) ((type *)arena_push_bytes_no_zero((a), sizeof(type), alignof(type)))
#define arena_push_array(a, type, n) ((type *)arena_push_bytes((a), (sizeof(type) * (n)), alignof(type)))
#define arena_push_array_no_zero(a, type, n) ((type *)arena_push_bytes_no_zero((a), (sizeof(type) * (n)), alignof(type)))
#define arena_pop(a, type, dst) arena_pop_struct((a), sizeof(type), dst)
#define arena_pop_array(a, type, n, dst) arena_pop_struct((a), sizeof(type) * (n), dst)
/* Returns a pointer to where the next allocation would be (at alignment of type).
* Equivalent to arena_push but without actually allocating anything or modifying the arena. */
#define arena_push_dry(a, type) (type *)(_arena_push_dry((a), alignof(type)))
struct arena_temp {
struct arena *arena;
u64 start_pos;
#if RTC
u64 scratch_id;
#endif
};
struct arena *arena_alloc(u64 reserve);
void arena_release(struct arena *arena);
void *arena_push_bytes_no_zero(struct arena *arena, u64 size, u64 align);
void arena_copy_replace(struct arena *dst, struct arena *src);
void arena_decommit_unused_blocks(struct arena *arena);
void arena_set_readonly(struct arena *arena);
void arena_set_readwrite(struct arena *arena);
INLINE u8 *arena_base(struct arena *arena)
{
return (u8 *)arena + ARENA_HEADER_SIZE;
}
INLINE void *arena_push_bytes(struct arena *arena, u64 size, u64 align)
{
void *p = arena_push_bytes_no_zero(arena, size, align);
MEMZERO(p, size);
return p;
}
INLINE void arena_pop_to(struct arena *arena, u64 pos)
{
ASSERT(arena->pos >= pos);
ASSERT(!arena->readonly);
ASAN_POISON(arena_base(arena) + pos, arena->pos - pos);
arena->pos = pos;
}
INLINE void arena_pop_struct(struct arena *arena, u64 size, void *copy_dst)
{
ASSERT(arena->pos >= size);
ASSERT(!arena->readonly);
u64 new_pos = arena->pos - size;
void *src = (void *)(arena_base(arena) + new_pos);
MEMCPY(copy_dst, src, size);
ASAN_POISON(arena_base(arena) + new_pos, arena->pos - new_pos);
arena->pos = new_pos;
}
INLINE void *arena_align(struct arena *arena, u64 align)
{
ASSERT(!arena->readonly);
if (align > 0) {
u64 aligned_start_pos = (arena->pos + (align - 1));
aligned_start_pos -= aligned_start_pos % align;
u64 align_bytes = aligned_start_pos - (u64)arena->pos;
if (align_bytes > 0) {
return (void *)arena_push_array_no_zero(arena, u8, align_bytes);
} else {
return (void *)(arena_base(arena) + arena->pos);
}
} else {
/* 0 alignment */
ASSERT(0);
return (void *)(arena_base(arena) + arena->pos);
}
}
INLINE struct arena_temp arena_temp_begin(struct arena *arena)
{
struct arena_temp t = ZI;
t.arena = arena;
t.start_pos = arena->pos;
return t;
}
INLINE void arena_temp_end(struct arena_temp temp)
{
arena_pop_to(temp.arena, temp.start_pos);
}
INLINE void arena_reset(struct arena *arena)
{
arena_pop_to(arena, 0);
}
INLINE struct string arena_to_string(struct arena *arena)
{
struct string b;
b.text = arena_base(arena);
b.len = arena->pos;
return b;
}
INLINE void *_arena_push_dry(struct arena *arena, u64 align)
{
u64 aligned_start_pos = (arena->pos + (align - 1));
aligned_start_pos -= aligned_start_pos % align;
void *ptr = arena_base(arena) + aligned_start_pos;
return ptr;
}
/* ========================== *
* Scratch
* ========================== */
/* Any parameterized arenas in the caller's scope should be passed into this
* function as a potential "conflict". This is to prevent friction in case the
* passed arena is itself a scratch arena from another scope (since
* parameterized arenas are often used to allocate persistent results for the
* caller).
*
* Use `scratch_begin_no_conflict` instead if there is no arena in the current
* scope that could potentially be a scratch arena from another scope. */
#define scratch_begin(potential_conflict) _scratch_begin(potential_conflict)
INLINE struct arena_temp _scratch_begin(struct arena *potential_conflict)
{
/* This function is currently hard-coded to support 2 scratch arenas */
STATIC_ASSERT(SYS_SCRATCH_ARENAS_PER_CTX == 2);
/* Use `scratch_begin_no_conflict` if no conflicts are present */
ASSERT(potential_conflict != 0);
struct sys_scratch_ctx *ctx = sys_scratch_ctx_from_fiber_id(sys_current_fiber_id());
struct arena *scratch_arena = ctx->arenas[0];
if (potential_conflict && scratch_arena == potential_conflict) {
scratch_arena = ctx->arenas[1];
}
struct arena_temp temp = arena_temp_begin(scratch_arena);
return temp;
}
/* This macro declares an unused "arena" variable that will error if an existing "arena"
* variable is present (due to shadowing). This is for catching obvious cases of
* `scratch_begin_no_conflict` getting called when an `arena` variable already
* exists in the caller's scope (`scratch_begin(arena)` should be called
* instead). */
#define scratch_begin_no_conflict() \
_scratch_begin_no_conflict(); \
do { \
u8 arena = 0; \
(UNUSED)arena; \
} while (0)
INLINE struct arena_temp _scratch_begin_no_conflict(void)
{
struct sys_scratch_ctx *ctx = sys_scratch_ctx_from_fiber_id(sys_current_fiber_id());
struct arena *scratch_arena = ctx->arenas[0];
struct arena_temp temp = arena_temp_begin(scratch_arena);
return temp;
}
INLINE void scratch_end(struct arena_temp scratch_temp)
{
arena_temp_end(scratch_temp);
}
#endif