store arena in its own committed memory

This commit is contained in:
jacob 2025-06-17 22:21:58 -05:00
parent 1f7afffe32
commit cfd8fb5f21
33 changed files with 273 additions and 258 deletions

View File

@ -32,13 +32,13 @@ struct exit_callback {
};
GLOBAL struct {
struct arena arena;
struct arena *arena;
struct string write_path;
struct sync_flag exit_sf;
/* Exit callbacks */
struct sys_mutex exit_callbacks_mutex;
struct arena exit_callbacks_arena;
struct arena *exit_callbacks_arena;
struct exit_callback *exit_callbacks_head;
} G = ZI, DEBUG_ALIAS(G, G_app);
@ -107,7 +107,7 @@ INTERNAL struct sys_window_settings default_window_settings(struct sys_window *w
void app_register_exit_callback(app_exit_callback_func *func)
{
struct sys_lock lock = sys_mutex_lock_e(&G.exit_callbacks_mutex);
struct exit_callback *callback = arena_push(&G.exit_callbacks_arena, struct exit_callback);
struct exit_callback *callback = arena_push(G.exit_callbacks_arena, struct exit_callback);
callback->func = func;
callback->next = G.exit_callbacks_head;
G.exit_callbacks_head = callback;
@ -255,7 +255,7 @@ void app_entry_point(struct string args_str)
#endif
}
G.write_path = initialize_write_directory(&G.arena, LIT(WRITE_DIR));
G.write_path = initialize_write_directory(G.arena, LIT(WRITE_DIR));
/* Startup logging */
{

View File

@ -6,57 +6,64 @@
#include "gstat.h"
/* Arbitrary block size */
#define ARENA_BLOCK_SIZE 4096
#define HEADER_BLOCK_SIZE 4096
#define DATA_BLOCK_SIZE 4096
/* NOTE: Application will exit if arena fails to reserve or commit initial
* memory. */
struct arena arena_alloc(u64 reserve)
struct arena *arena_alloc(u64 reserve)
{
__prof;
struct arena arena = ZI;
/* Round up to nearest block size */
u64 block_remainder = reserve % ARENA_BLOCK_SIZE;
u64 block_remainder = reserve % DATA_BLOCK_SIZE;
if (block_remainder > 0) {
reserve += ARENA_BLOCK_SIZE - block_remainder;
reserve += DATA_BLOCK_SIZE - block_remainder;
}
arena.base = sys_memory_reserve(reserve);
if (!arena.base) {
u8 *reserve_base = sys_memory_reserve(reserve + HEADER_BLOCK_SIZE);
if (!reserve_base) {
/* Hard fail on memory reserve failure for now */
sys_panic(LIT("Failed to reserve memory"));
}
arena.reserved = reserve;
gstat_add(GSTAT_MEMORY_RESERVED, arena.reserved);
u64 reserved = reserve;
gstat_add(GSTAT_MEMORY_RESERVED, reserve + HEADER_BLOCK_SIZE);
/* Commit one block to start with */
arena.base = sys_memory_commit(arena.base, ARENA_BLOCK_SIZE);
if (!arena.base) {
/* Commit header block */
u8 *base = sys_memory_commit(reserve_base, HEADER_BLOCK_SIZE + DATA_BLOCK_SIZE);
if (!base) {
/* Hard fail on commit failure */
sys_panic(LIT("Failed to commit initial memory block: System may be out of memory"));
}
arena.committed = ARENA_BLOCK_SIZE;
gstat_add(GSTAT_MEMORY_COMMITTED, arena.committed);
__profalloc(arena.base, ARENA_BLOCK_SIZE);
ASAN_POISON(arena.base, ARENA_BLOCK_SIZE);
base += HEADER_BLOCK_SIZE;
ASAN_POISON(base - HEADER_BLOCK_SIZE, HEADER_BLOCK_SIZE - sizeof(struct arena));
ASAN_POISON(base, DATA_BLOCK_SIZE);
/* Arena should be 64k aligned */
ASSERT(((u64)arena.base & 0xFFFF) == 0);
ASSERT(((u64)base & 0xFFF) == 0); /* Base should be 4k aligned */
CT_ASSERT(sizeof(struct arena) <= HEADER_BLOCK_SIZE); /* First block must fit arena */
gstat_add(GSTAT_MEMORY_COMMITTED, HEADER_BLOCK_SIZE + DATA_BLOCK_SIZE);
__profalloc(reserve_base, HEADER_BLOCK_SIZE + DATA_BLOCK_SIZE);
/* Create arena struct at end of header block */
struct arena *arena = (struct arena *)(base - sizeof(struct arena));
MEMZERO_STRUCT(arena);
gstat_add(GSTAT_NUM_ARENAS, 1);
arena->committed = DATA_BLOCK_SIZE;
arena->reserve_base = reserve_base;
arena->base = base;
arena->reserved = reserved;
return arena;
}
void arena_release(struct arena *arena)
{
ASAN_UNPOISON(arena->base, arena->committed);
ASAN_UNPOISON(arena->reserve_, arena->committed + HEADER_BLOCK_SIZE);
__prof;
__proffree(arena->base);
__proffree(arena->reserve_base);
gstat_sub(GSTAT_MEMORY_COMMITTED, arena->committed);
gstat_sub(GSTAT_MEMORY_RESERVED, arena->reserved);
gstat_sub(GSTAT_NUM_ARENAS, 1);
sys_memory_release(arena->base);
sys_memory_release(arena->reserve_base);
}
/* NOTE: Application will exit if arena fails to commit memory */
@ -76,8 +83,8 @@ void *arena_push_bytes_no_zero(struct arena *arena, u64 size, u64 align)
if (new_pos > arena->committed) {
__profscope(_arena_push_bytes_COMMIT);
/* Commit new block(s) */
u64 blocks_needed = (new_pos - arena->committed + ARENA_BLOCK_SIZE - 1) / ARENA_BLOCK_SIZE;
u64 commit_bytes = blocks_needed * ARENA_BLOCK_SIZE;
u64 blocks_needed = (new_pos - arena->committed + DATA_BLOCK_SIZE - 1) / DATA_BLOCK_SIZE;
u64 commit_bytes = blocks_needed * DATA_BLOCK_SIZE;
u64 new_capacity = arena->committed + commit_bytes;
if (new_capacity > arena->reserved) {
/* Hard fail if we overflow reserved memory for now */
@ -90,8 +97,8 @@ void *arena_push_bytes_no_zero(struct arena *arena, u64 size, u64 align)
}
arena->committed += commit_bytes;
gstat_add(GSTAT_MEMORY_COMMITTED, commit_bytes);
__proffree(arena->base);
__profalloc(arena->base, arena->committed + commit_bytes);
__proffree(arena->reserve_base);
__profalloc(arena->reserve_base, arena->committed + commit_bytes + HEADER_BLOCK_SIZE);
ASAN_POISON(commit_address, commit_bytes);
}
start = arena->base + aligned_start_pos;
@ -117,8 +124,9 @@ void arena_copy_replace(struct arena *dest, struct arena *src)
void arena_decommit_unused_blocks(struct arena *arena)
{
#if 0
ASSERT(!arena->readonly);
u64 next_block_pos = ARENA_BLOCK_SIZE * ((arena->pos + (ARENA_BLOCK_SIZE - 1)) / ARENA_BLOCK_SIZE);
u64 next_block_pos = DATA_BLOCK_SIZE * ((arena->pos + (DATA_BLOCK_SIZE - 1)) / DATA_BLOCK_SIZE);
if (arena->committed > next_block_pos) {
u8 *decommit_start = arena->base + next_block_pos;
u64 decommit_size = (arena->base + arena->committed) - decommit_start;
@ -126,6 +134,11 @@ void arena_decommit_unused_blocks(struct arena *arena)
arena->committed = next_block_pos;
gstat_sub(GSTAT_MEMORY_COMMITTED, decommit_size);
}
#else
/* TODO */
ASSERT(false); /* Not implemented */
(UNUSED)arena;
#endif
}
void arena_set_readonly(struct arena *arena)
@ -133,12 +146,12 @@ void arena_set_readonly(struct arena *arena)
#if RTC
arena->readonly = true;
#endif
sys_memory_set_committed_readonly(arena->base, arena->committed);
sys_memory_set_committed_readonly(arena->reserve_base, arena->committed + HEADER_BLOCK_SIZE);
}
void arena_set_readwrite(struct arena *arena)
{
sys_memory_set_committed_readwrite(arena->base, arena->committed);
sys_memory_set_committed_readwrite(arena->reserve_base, arena->committed + HEADER_BLOCK_SIZE);
#if RTC
arena->readonly = false;
#endif

View File

@ -25,7 +25,7 @@ struct arena_temp {
#endif
};
struct arena arena_alloc(u64 reserve);
struct arena *arena_alloc(u64 reserve);
void arena_release(struct arena *arena);
void *arena_push_bytes_no_zero(struct arena *arena, u64 size, u64 align);
void arena_copy_replace(struct arena *dest, struct arena *src);

View File

@ -21,7 +21,7 @@ GLOBAL struct {
u64 num_assets;
struct sys_mutex store_mutex;
struct arena store_arena;
struct arena *store_arena;
#if RTC
/* Array of len `num_assets` pointing into populated entries of `lookup`. */
@ -235,7 +235,7 @@ struct asset_cache_store asset_cache_store_open(void)
struct sys_lock lock = sys_mutex_lock_e(&G.store_mutex);
struct asset_cache_store store = {
.lock = lock,
.arena = &G.store_arena
.arena = G.store_arena
};
return store;
}

View File

@ -99,7 +99,7 @@ void bitbuff_release(struct bitbuff *bb)
{
/* Only arena bitbuffs need to be released */
if (bb->is_backed_by_arena) {
arena_release(&bb->arena);
arena_release(bb->arena);
}
}
@ -119,7 +119,7 @@ struct bitbuff_writer bw_from_bitbuff(struct bitbuff *bb)
struct bitbuff_writer res = ZI;
res.bb = bb;
if (bb->is_backed_by_arena) {
res.base = bb->arena.base;
res.base = bb->arena->base;
} else {
res.base = bb->fixed_buffer.text;
}
@ -178,7 +178,7 @@ b32 bw_check_overflow_bits(struct bitbuff_writer *bw, u64 num_bits)
} else {
u64 bytes_needed = (bw->cur_bit + num_bits + 7) >> 3;
if (bb->is_backed_by_arena) {
struct arena *arena = &bb->arena;
struct arena *arena = bb->arena;
if (bytes_needed >= arena->pos) {
/* Grow arena */
u64 push_size = (((bytes_needed - arena->pos) / WRITE_OVERFLOW_ARENA_PUSH_SIZE) + 1) * WRITE_OVERFLOW_ARENA_PUSH_SIZE;
@ -382,7 +382,7 @@ struct bitbuff_reader br_from_bitbuff(struct bitbuff *bb)
res.base = bb->fixed_buffer.text;
res.base_len = bb->fixed_buffer.len;
} else {
struct arena *arena = &bb->arena;
struct arena *arena = bb->arena;
res.base = arena->base;
res.base_len = arena->pos;
}

View File

@ -9,7 +9,7 @@ struct bitbuff {
b32 is_backed_by_arena;
/* If `is_arena_bitbuff` is true, this dynamically-sized arena will be used for reading & writing (meaning writing cannot overflow) */
struct arena arena;
struct arena *arena;
/* If `is_arena_bitbuff` is false, this fixed-sized buffer willl be used for reading & writing */
struct string fixed_buffer;

View File

@ -10,13 +10,13 @@
struct buddy_ctx *buddy_ctx_alloc(u64 reserve)
{
/* TODO: Determine meta reserve dynamically */
struct arena meta_arena = arena_alloc(GIGABYTE(64));
struct buddy_ctx *ctx = arena_push(&meta_arena, struct buddy_ctx);
struct arena *meta_arena = arena_alloc(GIGABYTE(64));
struct buddy_ctx *ctx = arena_push(meta_arena, struct buddy_ctx);
ctx->meta_arena = meta_arena;
ctx->data_arena = arena_alloc(reserve);
/* TODO: Minimum block size */
ctx->levels = arena_push_array(&ctx->meta_arena, struct buddy_level, 64);
ctx->levels = arena_push_array(ctx->meta_arena, struct buddy_level, 64);
for (u64 i = 0; i < 64; ++i) {
struct buddy_level *level = &ctx->levels[i];
level->ctx = ctx;
@ -29,8 +29,8 @@ struct buddy_ctx *buddy_ctx_alloc(u64 reserve)
void buddy_ctx_release(struct buddy_ctx *ctx)
{
arena_release(&ctx->data_arena);
arena_release(&ctx->meta_arena);
arena_release(ctx->data_arena);
arena_release(ctx->meta_arena);
}
/* ========================== *
@ -44,7 +44,7 @@ INTERNAL struct buddy_block *buddy_block_alloc_internal(struct buddy_ctx *ctx)
block = ctx->first_free_block;
ctx->first_free_block = block->next;
} else {
block = arena_push_no_zero(&ctx->meta_arena, struct buddy_block);
block = arena_push_no_zero(ctx->meta_arena, struct buddy_block);
}
MEMZERO_STRUCT(block);
return block;
@ -111,7 +111,7 @@ INTERNAL struct buddy_block *buddy_block_get_unused(struct buddy_ctx *ctx, struc
right->sibling = left;
block = left;
} else {
struct arena *arena = &ctx->data_arena;
struct arena *arena = ctx->data_arena;
/* Grow arena */
i64 level_commit_diff = (level->size * 2) - arena->pos;

View File

@ -23,8 +23,8 @@ struct buddy_level {
};
struct buddy_ctx {
struct arena meta_arena;
struct arena data_arena;
struct arena *meta_arena;
struct arena *data_arena;
struct buddy_level *levels;
struct buddy_block *first_free_block;
};

View File

@ -407,6 +407,7 @@ struct arena {
u64 committed;
u64 reserved;
u8 *base;
u8 *reserve_base;
#if RTC
b32 readonly;
#endif

View File

@ -80,7 +80,7 @@
#define DX12_TEST 1
#define DX12_TEST 0

View File

@ -25,7 +25,7 @@ struct font_task_params {
struct font_task_params_store {
struct font_task_params *head_free;
struct arena arena;
struct arena *arena;
struct sys_mutex mutex;
};
@ -72,7 +72,7 @@ INTERNAL struct font_task_params *font_task_params_alloc(void)
p = G.params.head_free;
G.params.head_free = p->next_free;
} else {
p = arena_push(&G.params.arena, struct font_task_params);
p = arena_push(G.params.arena, struct font_task_params);
}
sys_mutex_unlock(&lock);
}

View File

@ -99,7 +99,7 @@ struct dx11_buffer {
D3D11_BUFFER_DESC desc;
/* Cpu buffer */
struct arena cpu_buffer_arena;
struct arena *cpu_buffer_arena;
u8 *cpu_buffer;
/* Gpu buffer */
@ -161,12 +161,12 @@ struct dx11_plan {
struct dx11_handle_header header;
/* Commands w/ data still in cpu memory */
struct arena cpu_cmds_arena;
struct arena *cpu_cmds_arena;
struct dx11_cmd *cpu_first_cmd;
struct dx11_cmd *cpu_last_cmd;
/* Commands w/ buffer data submitted to video memory */
struct arena gpu_cmds_arena;
struct arena *gpu_cmds_arena;
struct dx11_cmd *gpu_first_cmd;
struct dx11_cmd *gpu_last_cmd;
@ -212,7 +212,7 @@ struct dx11_shader_desc {
/* Internal */
#if RESOURCE_RELOADING
struct arena includes_arena;
struct arena *includes_arena;
struct sys_mutex includes_mutex;
struct dict includes_dict;
struct atomic_i32 is_dirty;
@ -220,7 +220,7 @@ struct dx11_shader_desc {
};
GLOBAL struct {
struct arena arena;
struct arena *arena;
#if PROFILING
struct __prof_dx11_ctx *profiling_ctx;
@ -242,22 +242,22 @@ GLOBAL struct {
/* Buffer pool */
struct sys_mutex buffers_mutex;
struct arena buffers_arena;
struct arena *buffers_arena;
struct dx11_buffer *first_free_buffer;
/* Plan pool */
struct sys_mutex plans_mutex;
struct arena plans_arena;
struct arena *plans_arena;
struct dx11_plan *first_free_plan;
/* Dispatch state pool */
struct sys_mutex dispatch_states_mutex;
struct arena dispatch_states_arena;
struct arena *dispatch_states_arena;
struct dx11_dispatch_state *first_free_dispatch_state;
/* Texture pool */
struct sys_mutex textures_mutex;
struct arena textures_arena;
struct arena *textures_arena;
struct dx11_texture *first_free_texture;
/* Shaders */
@ -686,7 +686,7 @@ INTERNAL void init_shader_table(void)
struct dx11_shader_desc *desc = &G.shader_info[i];
desc->includes_arena = arena_alloc(MEGABYTE(8));
desc->includes_mutex = sys_mutex_alloc();
desc->includes_dict = dict_init(&desc->includes_arena, 64);
desc->includes_dict = dict_init(desc->includes_arena, 64);
}
#endif
}
@ -703,7 +703,7 @@ INTERNAL void shader_add_include(struct dx11_shader_desc *desc, struct string in
struct dict *dict = &desc->includes_dict;
struct sys_lock lock = sys_mutex_lock_e(&desc->includes_mutex);
{
dict_set(&desc->includes_arena, dict, hash, 1);
dict_set(desc->includes_arena, dict, hash, 1);
}
sys_mutex_unlock(&lock);
}
@ -1044,7 +1044,7 @@ INTERNAL struct dx11_texture *dx11_texture_alloc(enum DXGI_FORMAT format, u32 fl
t = G.first_free_texture;
G.first_free_texture = t->next_free;
} else {
t = arena_push_no_zero(&G.textures_arena, struct dx11_texture);
t = arena_push_no_zero(G.textures_arena, struct dx11_texture);
}
sys_mutex_unlock(&lock);
}
@ -1167,7 +1167,7 @@ INTERNAL struct dx11_buffer *dx11_buffer_alloc(struct D3D11_BUFFER_DESC desc, D3
__prof;
struct dx11_buffer *buffer = NULL;
{
struct arena cpu_buffer_arena = ZI;
struct arena *cpu_buffer_arena = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(&G.buffers_mutex);
if (G.first_free_buffer) {
@ -1175,18 +1175,18 @@ INTERNAL struct dx11_buffer *dx11_buffer_alloc(struct D3D11_BUFFER_DESC desc, D3
G.first_free_buffer = buffer->next_free;
cpu_buffer_arena = buffer->cpu_buffer_arena;
} else {
buffer = arena_push_no_zero(&G.buffers_arena, struct dx11_buffer);
buffer = arena_push_no_zero(G.buffers_arena, struct dx11_buffer);
}
sys_mutex_unlock(&lock);
}
MEMZERO_STRUCT(buffer);
if (!cpu_buffer_arena.base) {
if (!cpu_buffer_arena) {
cpu_buffer_arena = arena_alloc(GIGABYTE(64));
}
buffer->cpu_buffer_arena = cpu_buffer_arena;
}
buffer->desc = desc;
buffer->cpu_buffer = arena_push_dry(&buffer->cpu_buffer_arena, u8);
buffer->cpu_buffer = arena_push_dry(buffer->cpu_buffer_arena, u8);
if (desc.BindFlags & D3D11_BIND_SHADER_RESOURCE) {
ASSERT(desc.StructureByteStride != 0); /* Must provide stride for shader resource buffers */
@ -1221,7 +1221,7 @@ INTERNAL void dx11_buffer_release(struct dx11_buffer *buffer)
INTERNAL void *dx11_buffer_push(struct dx11_buffer *buffer, u64 size)
{
void *data = arena_push_array_no_zero(&buffer->cpu_buffer_arena, u8, size);
void *data = arena_push_array_no_zero(buffer->cpu_buffer_arena, u8, size);
return data;
}
@ -1230,7 +1230,7 @@ INTERNAL void dx11_buffer_submit(struct dx11_buffer *buffer)
__prof;
/* Grow GPU buffer if necessary */
u64 cpu_data_size = buffer->cpu_buffer_arena.pos;
u64 cpu_data_size = buffer->cpu_buffer_arena->pos;
if (cpu_data_size > buffer->gpu_buffer_capacity) {
if (buffer->srv) {
@ -1276,7 +1276,7 @@ INTERNAL void dx11_buffer_submit(struct dx11_buffer *buffer)
}
/* Reset cpu data */
arena_reset(&buffer->cpu_buffer_arena);
arena_reset(buffer->cpu_buffer_arena);
}
/* ========================== *
@ -1288,8 +1288,8 @@ struct gpu_handle gpu_plan_alloc(void)
__prof;
struct dx11_plan *plan = NULL;
{
struct arena cpu_cmds_arena = ZI;
struct arena gpu_cmds_arena = ZI;
struct arena *cpu_cmds_arena = NULL;
struct arena *gpu_cmds_arena = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(&G.plans_mutex);
if (G.first_free_plan) {
@ -1298,21 +1298,21 @@ struct gpu_handle gpu_plan_alloc(void)
cpu_cmds_arena = plan->cpu_cmds_arena;
gpu_cmds_arena = plan->gpu_cmds_arena;
} else {
plan = arena_push_no_zero(&G.plans_arena, struct dx11_plan);
plan = arena_push_no_zero(G.plans_arena, struct dx11_plan);
}
sys_mutex_unlock(&lock);
}
MEMZERO_STRUCT(plan);
if (!cpu_cmds_arena.base) {
if (!cpu_cmds_arena) {
cpu_cmds_arena = arena_alloc(GIGABYTE(64));
}
if (!gpu_cmds_arena.base) {
if (!gpu_cmds_arena) {
gpu_cmds_arena = arena_alloc(GIGABYTE(64));
}
plan->cpu_cmds_arena = cpu_cmds_arena;
plan->gpu_cmds_arena = gpu_cmds_arena;
arena_reset(&plan->cpu_cmds_arena);
arena_reset(&plan->gpu_cmds_arena);
arena_reset(plan->cpu_cmds_arena);
arena_reset(plan->gpu_cmds_arena);
}
plan->header.kind = DX11_HANDLE_KIND_PLAN;
@ -1393,10 +1393,10 @@ void gpu_push_cmd(struct gpu_handle gpu_plan, struct gpu_cmd_params params)
/* Start new cmd */
if (!cmd) {
/* TODO: Better count method */
cmd = arena_push(&plan->cpu_cmds_arena, struct dx11_cmd);
cmd = arena_push(plan->cpu_cmds_arena, struct dx11_cmd);
cmd->kind = params.kind;
cmd->mesh.vertex_offset = (plan->cmd_buffers.mesh.vertex_buffer->cpu_buffer_arena.pos / sizeof(struct dx11_mesh_vertex));
cmd->mesh.index_offset = (plan->cmd_buffers.mesh.index_buffer->cpu_buffer_arena.pos / sizeof(u32));
cmd->mesh.vertex_offset = (plan->cmd_buffers.mesh.vertex_buffer->cpu_buffer_arena->pos / sizeof(struct dx11_mesh_vertex));
cmd->mesh.index_offset = (plan->cmd_buffers.mesh.index_buffer->cpu_buffer_arena->pos / sizeof(u32));
if (plan->cpu_last_cmd) {
plan->cpu_last_cmd->next = cmd;
} else {
@ -1439,11 +1439,11 @@ void gpu_push_cmd(struct gpu_handle gpu_plan, struct gpu_cmd_params params)
/* Start new cmd */
if (!cmd) {
/* TODO: Better count method */
cmd = arena_push(&plan->cpu_cmds_arena, struct dx11_cmd);
cmd = arena_push(plan->cpu_cmds_arena, struct dx11_cmd);
cmd->kind = params.kind;
cmd->texture.sprite = params.texture.sprite;
cmd->texture.texture = params.texture.texture;
cmd->texture.instance_offset = (plan->cmd_buffers.texture.instance_buffer->cpu_buffer_arena.pos / sizeof(struct dx11_texture_instance));
cmd->texture.instance_offset = (plan->cmd_buffers.texture.instance_buffer->cpu_buffer_arena->pos / sizeof(struct dx11_texture_instance));
if (plan->cpu_last_cmd) {
plan->cpu_last_cmd->next = cmd;
} else {
@ -1473,9 +1473,9 @@ void gpu_push_cmd(struct gpu_handle gpu_plan, struct gpu_cmd_params params)
/* Start new cmd */
if (!cmd) {
/* TODO: Better count method */
cmd = arena_push(&plan->cpu_cmds_arena, struct dx11_cmd);
cmd = arena_push(plan->cpu_cmds_arena, struct dx11_cmd);
cmd->kind = params.kind;
cmd->grid.instance_offset = (plan->cmd_buffers.grid.instance_buffer->cpu_buffer_arena.pos / sizeof(struct dx11_grid_instance));
cmd->grid.instance_offset = (plan->cmd_buffers.grid.instance_buffer->cpu_buffer_arena->pos / sizeof(struct dx11_grid_instance));
if (plan->cpu_last_cmd) {
plan->cpu_last_cmd->next = cmd;
} else {
@ -1500,9 +1500,9 @@ void gpu_push_cmd(struct gpu_handle gpu_plan, struct gpu_cmd_params params)
case GPU_CMD_KIND_TEST:
{
struct dx11_cmd *cmd = arena_push(&plan->cpu_cmds_arena, struct dx11_cmd);
struct dx11_cmd *cmd = arena_push(plan->cpu_cmds_arena, struct dx11_cmd);
cmd->kind = params.kind;
cmd->test.instance_offset = (plan->cmd_buffers.test.instance_buffer->cpu_buffer_arena.pos / sizeof(struct dx11_test_instance));
cmd->test.instance_offset = (plan->cmd_buffers.test.instance_buffer->cpu_buffer_arena->pos / sizeof(struct dx11_test_instance));
if (plan->cpu_last_cmd) {
plan->cpu_last_cmd->next = cmd;
} else {
@ -1596,7 +1596,7 @@ void gpu_dispatch(struct gpu_dispatch_params params)
struct dx11_plan *plan = (struct dx11_plan *)params.plan.v;
/* Swap cmd lists */
struct arena swp_arena = plan->gpu_cmds_arena;
struct arena *swp_arena = plan->gpu_cmds_arena;
plan->gpu_cmds_arena = plan->cpu_cmds_arena;
plan->gpu_first_cmd = plan->cpu_first_cmd;
plan->gpu_last_cmd = plan->cpu_last_cmd;
@ -1605,7 +1605,7 @@ void gpu_dispatch(struct gpu_dispatch_params params)
plan->cpu_cmds_arena = swp_arena;
plan->cpu_first_cmd = NULL;
plan->cpu_last_cmd = NULL;
arena_reset(&plan->cpu_cmds_arena);
arena_reset(plan->cpu_cmds_arena);
/* Submit cmd data */
{

View File

@ -81,7 +81,7 @@ struct pipeline_error {
};
struct command_queue {
struct arena arena;
struct arena *arena;
struct sys_mutex mutex;
ID3D12CommandQueue *cq;
};
@ -115,7 +115,7 @@ struct dx12_resource {
struct cpu_descriptor_heap {
enum D3D12_DESCRIPTOR_HEAP_TYPE type;
struct arena arena;
struct arena *arena;
struct sys_mutex mutex;
u32 descriptor_size;
@ -166,19 +166,19 @@ struct handle_entry {
GLOBAL struct {
/* Handles pool */
struct sys_mutex handle_entries_mutex;
struct arena handle_entries_arena;
struct arena *handle_entries_arena;
struct handle_entry *first_free_handle_entry;
u64 num_handle_entries_reserved;
/* Descriptor heaps pool */
struct sys_mutex gpu_descriptor_heaps_mutex;
struct arena gpu_descriptor_heaps_arena;
struct arena *gpu_descriptor_heaps_arena;
struct gpu_descriptor_heap *first_free_gpu_descriptor_heap;
struct gpu_descriptor_heap *last_free_gpu_descriptor_heap;
/* Resources pool */
struct sys_mutex resources_mutex;
struct arena resources_arena;
struct arena *resources_arena;
struct dx12_resource *first_free_resource;
@ -376,7 +376,7 @@ INTERNAL struct gpu_handle handle_alloc(enum handle_kind kind, void *data)
old_gen = entry->gen;
idx = entry->idx;
} else {
entry = arena_push_no_zero(&G.handle_entries_arena, struct handle_entry);
entry = arena_push_no_zero(G.handle_entries_arena, struct handle_entry);
idx = G.num_handle_entries_reserved++;
}
sys_mutex_unlock(&lock);
@ -398,7 +398,7 @@ INTERNAL struct handle_entry *handle_get_entry(struct gpu_handle handle, struct
sys_assert_locked_e_or_s(lock, &G.handle_entries_mutex);
struct handle_entry *res = NULL;
if (handle.idx > 0 && handle.idx < G.num_handle_entries_reserved) {
struct handle_entry *tmp = &((struct handle_entry *)G.handle_entries_arena.base)[handle.idx];
struct handle_entry *tmp = &((struct handle_entry *)G.handle_entries_arena->base)[handle.idx];
if (tmp->gen == handle.gen) {
res = tmp;
}
@ -636,7 +636,7 @@ INTERNAL void dx12_init_swapchain(struct sys_window *window)
/* Create swapchain1 */
IDXGISwapChain1 *swapchain1 = NULL;
hr = IDXGIFactory2_CreateSwapChainForHwnd(G.factory, (IUnknown *)G.cq_direct, hwnd, &desc, NULL, NULL, &swapchain1);
hr = IDXGIFactory2_CreateSwapChainForHwnd(G.factory, (IUnknown *)G.cq_direct->cq, hwnd, &desc, NULL, NULL, &swapchain1);
if (FAILED(hr)) {
dx12_init_error(LIT("Failed to create IDXGISwapChain1"));
}
@ -1189,8 +1189,8 @@ INTERNAL struct command_queue *command_queue_alloc(enum D3D12_COMMAND_LIST_TYPE
{
struct command_queue *cq = NULL;
{
struct arena arena = arena_alloc(GIGABYTE(64));
cq = arena_push(&arena, struct command_queue);
struct arena *arena = arena_alloc(GIGABYTE(64));
cq = arena_push(arena, struct command_queue);
cq->arena = arena;
}
@ -1200,7 +1200,7 @@ INTERNAL struct command_queue *command_queue_alloc(enum D3D12_COMMAND_LIST_TYPE
desc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
desc.Type = type;
desc.Priority = priority;
HRESULT hr = ID3D12Device_CreateCommandQueue(G.device, &desc, &IID_ID3D12CommandQueue, (void **)&G.cq_copy_critical);
HRESULT hr = ID3D12Device_CreateCommandQueue(G.device, &desc, &IID_ID3D12CommandQueue, (void **)&cq->cq);
if (FAILED(hr)) {
dx12_init_error(LIT("Failed to create command queue"));
}
@ -1223,8 +1223,8 @@ INTERNAL struct cpu_descriptor_heap *cpu_descriptor_heap_alloc(enum D3D12_DESCRI
{
struct cpu_descriptor_heap *dh = NULL;
{
struct arena arena = arena_alloc(MEGABYTE(64));
dh = arena_push(&arena, struct cpu_descriptor_heap);
struct arena *arena = arena_alloc(MEGABYTE(64));
dh = arena_push(arena, struct cpu_descriptor_heap);
dh->arena = arena;
}
dh->mutex = sys_mutex_alloc();
@ -1280,7 +1280,7 @@ INTERNAL struct descriptor *descriptor_alloc(struct cpu_descriptor_heap *dh)
sys_panic(LIT("Max descriptors reached in heap"));
}
d = arena_push_no_zero(&dh->arena, struct descriptor);
d = arena_push_no_zero(dh->arena, struct descriptor);
handle.ptr = dh->handle.ptr + (dh->num_descriptors_reserved * dh->descriptor_size);
++dh->num_descriptors_reserved;
}
@ -1340,7 +1340,7 @@ INTERNAL struct gpu_descriptor_heap *gpu_descriptor_heap_alloc(struct cpu_descri
}
} else {
/* No available heap available for reuse, allocate new */
dh_gpu = arena_push_no_zero(&G.gpu_descriptor_heaps_arena, struct gpu_descriptor_heap);
dh_gpu = arena_push_no_zero(G.gpu_descriptor_heaps_arena, struct gpu_descriptor_heap);
}
sys_mutex_unlock(&lock);
}
@ -1410,7 +1410,7 @@ struct dx12_buffer {
/* TODO: Move command list out of plan struct */
struct plan {
struct arena arena;
struct arena *arena;
ID3D12CommandAllocator *ca_direct;
ID3D12GraphicsCommandList *cl_direct;
@ -1424,8 +1424,8 @@ INTERNAL struct plan *plan_alloc(void)
HRESULT hr = 0;
struct plan *plan = NULL;
{
struct arena arena = arena_alloc(MEGABYTE(64));
plan = arena_push(&arena, struct plan);
struct arena *arena = arena_alloc(MEGABYTE(64));
plan = arena_push(arena, struct plan);
plan->arena = arena;
}
@ -1480,7 +1480,7 @@ INTERNAL struct dx12_resource *dx12_resource_alloc(D3D12_HEAP_PROPERTIES heap_pr
r = G.first_free_resource;
G.first_free_resource = r->next_free;
} else {
r = arena_push_no_zero(&G.resources_arena, struct dx12_resource);
r = arena_push_no_zero(G.resources_arena, struct dx12_resource);
}
sys_mutex_unlock(&lock);
}
@ -1668,7 +1668,8 @@ void gpu_dispatch(struct gpu_dispatch_params params)
struct gpu_descriptor_heap *temp_descriptor_heap = gpu_descriptor_heap_alloc(G.cbv_srv_uav_heap);
/* Material pass */
if (plan->material_instances->count > 0) {
//if (plan->material_instances->count > 0) {
if ((false)) {
//struct pipeline *pipeline = dx12_get_pipeline(pipeline_scope, LIT("material"));
struct pipeline *pipeline = &G.test_pipeline;

View File

@ -112,7 +112,7 @@ struct host_rcv_packet {
};
struct host_rcv_buffer {
struct arena arena;
struct arena *arena;
struct host_rcv_packet *first_packet;
struct host_rcv_packet *last_packet;
};
@ -177,25 +177,25 @@ struct host_startup_receipt host_startup(struct sock_startup_receipt *sock_sr)
struct host *host_alloc(u16 listen_port)
{
struct arena arena = arena_alloc(GIGABYTE(64));
struct host *host = arena_push(&arena, struct host);
struct arena *arena = arena_alloc(GIGABYTE(64));
struct host *host = arena_push(arena, struct host);
host->arena = arena;
host->cmd_arena = arena_alloc(GIGABYTE(64));
host->channel_arena = arena_alloc(GIGABYTE(64));
host->rcv_buffer_read = arena_push(&host->arena, struct host_rcv_buffer);
host->rcv_buffer_write = arena_push(&host->arena, struct host_rcv_buffer);
host->rcv_buffer_read = arena_push(host->arena, struct host_rcv_buffer);
host->rcv_buffer_write = arena_push(host->arena, struct host_rcv_buffer);
host->rcv_buffer_read->arena = arena_alloc(GIGABYTE(64));
host->rcv_buffer_write->arena = arena_alloc(GIGABYTE(64));
host->buddy = buddy_ctx_alloc(GIGABYTE(64));
host->channels = arena_push_dry(&host->channel_arena, struct host_channel);
host->channels = arena_push_dry(host->channel_arena, struct host_channel);
host->num_channel_lookup_bins = NUM_CHANNEL_LOOKUP_BINS;
host->channel_lookup_bins = arena_push_array(&host->arena, struct host_channel_lookup_bin, host->num_channel_lookup_bins);
host->channel_lookup_bins = arena_push_array(host->arena, struct host_channel_lookup_bin, host->num_channel_lookup_bins);
host->num_msg_assembler_lookup_bins = NUM_MSG_ASSEMBLER_LOOKUP_BINS;
host->msg_assembler_lookup_bins = arena_push_array(&host->arena, struct host_msg_assembler_lookup_bin, host->num_msg_assembler_lookup_bins);
host->msg_assembler_lookup_bins = arena_push_array(host->arena, struct host_msg_assembler_lookup_bin, host->num_msg_assembler_lookup_bins);
host->sock = sock_alloc(listen_port, MEGABYTE(2), MEGABYTE(2));
@ -217,11 +217,11 @@ void host_release(struct host *host)
sock_release(host->sock);
buddy_ctx_release(host->buddy);
arena_release(&host->rcv_buffer_write->arena);
arena_release(&host->rcv_buffer_read->arena);
arena_release(&host->channel_arena);
arena_release(&host->cmd_arena);
arena_release(&host->arena);
arena_release(host->rcv_buffer_write->arena);
arena_release(host->rcv_buffer_read->arena);
arena_release(host->channel_arena);
arena_release(host->cmd_arena);
arena_release(host->arena);
}
/* ========================== *
@ -296,7 +296,7 @@ INTERNAL struct host_channel *host_channel_alloc(struct host *host, struct sock_
id = channel->id;
++id.gen;
} else {
channel = arena_push_no_zero(&host->channel_arena, struct host_channel);
channel = arena_push_no_zero(host->channel_arena, struct host_channel);
id.gen = 1;
id.idx = host->num_channels_reserved;
++host->num_channels_reserved;
@ -398,7 +398,7 @@ INTERNAL struct host_msg_assembler *host_msg_assembler_alloc(struct host_channel
ma = host->first_free_msg_assembler;
host->first_free_msg_assembler = ma->next_free;
} else {
ma = arena_push_no_zero(&host->arena, struct host_msg_assembler);
ma = arena_push_no_zero(host->arena, struct host_msg_assembler);
}
MEMZERO_STRUCT(ma);
ma->channel = channel;
@ -553,7 +553,7 @@ INTERNAL struct host_snd_packet *host_channel_snd_packet_alloc(struct host_chann
packet = host->first_free_packet;
host->first_free_packet = packet->next;
} else {
packet = arena_push_no_zero(&host->arena, struct host_snd_packet);
packet = arena_push_no_zero(host->arena, struct host_snd_packet);
}
MEMZERO_STRUCT(packet);
@ -584,7 +584,7 @@ INTERNAL struct host_snd_packet *host_channel_snd_packet_alloc(struct host_chann
INTERNAL struct host_cmd *host_cmd_alloc_and_append(struct host *host)
{
struct host_cmd *cmd = arena_push(&host->cmd_arena, struct host_cmd);
struct host_cmd *cmd = arena_push(host->cmd_arena, struct host_cmd);
if (host->last_cmd) {
host->last_cmd->next = cmd;
} else {
@ -614,7 +614,7 @@ void host_queue_write(struct host *host, struct host_channel_id channel_id, stru
struct host_cmd *cmd = host_cmd_alloc_and_append(host);
cmd->kind = HOST_CMD_KIND_WRITE;
cmd->channel_id = channel_id;
cmd->write_msg = string_copy(&host->cmd_arena, msg);
cmd->write_msg = string_copy(host->cmd_arena, msg);
cmd->write_reliable = flags & HOST_WRITE_FLAG_RELIABLE;
}
@ -824,7 +824,7 @@ struct host_event_list host_update_begin(struct arena *arena, struct host *host)
/* Reset read buffer */
rcv_buffer->first_packet = NULL;
rcv_buffer->last_packet = NULL;
arena_reset(&rcv_buffer->arena);
arena_reset(rcv_buffer->arena);
}
/* Update channels */
@ -1052,7 +1052,7 @@ void host_update_end(struct host *host)
/* Reset cmds */
host->first_cmd = NULL;
host->last_cmd = NULL;
arena_reset(&host->cmd_arena);
arena_reset(host->cmd_arena);
scratch_end(scratch);
}
@ -1064,10 +1064,10 @@ void host_update_end(struct host *host)
INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(host_receiver_thread_entry_point, arg)
{
u64 read_buff_size = KILOBYTE(64);
struct arena read_buff_arena = arena_alloc(read_buff_size);
struct arena *read_buff_arena = arena_alloc(read_buff_size);
struct string read_buff = ZI;
read_buff.len = read_buff_size;
read_buff.text = arena_push_array_no_zero(&read_buff_arena, u8, KILOBYTE(64));
read_buff.text = arena_push_array_no_zero(read_buff_arena, u8, KILOBYTE(64));
struct host *host = (struct host *)arg;
@ -1086,9 +1086,9 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(host_receiver_thread_entry_point, arg)
struct sys_lock lock = sys_mutex_lock_e(&host->rcv_buffer_write_mutex);
{
struct host_rcv_buffer *rcv_buffer = host->rcv_buffer_write;
struct host_rcv_packet *packet = arena_push(&rcv_buffer->arena, struct host_rcv_packet);
struct host_rcv_packet *packet = arena_push(rcv_buffer->arena, struct host_rcv_packet);
packet->address = address;
packet->data = string_copy(&rcv_buffer->arena, data);
packet->data = string_copy(rcv_buffer->arena, data);
if (rcv_buffer->last_packet) {
rcv_buffer->last_packet->next = packet;
} else {
@ -1101,5 +1101,5 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(host_receiver_thread_entry_point, arg)
}
}
arena_release(&read_buff_arena);
arena_release(read_buff_arena);
}

View File

@ -64,18 +64,18 @@ struct host_event_list {
};
struct host {
struct arena arena;
struct arena *arena;
struct sock *sock;
struct buddy_ctx *buddy; /* For storing msg assembler data */
struct arena cmd_arena;
struct arena *cmd_arena;
struct host_cmd *first_cmd;
struct host_cmd *last_cmd;
struct host_cmd *first_free_cmd;
struct arena channel_arena;
struct arena *channel_arena;
struct host_channel *channels;
struct host_channel *first_free_channel;
u64 num_channels_reserved;

View File

@ -17,7 +17,7 @@ GLOBAL struct {
struct atomic_i32 initialized;
struct sys_mutex callbacks_mutex;
struct arena callbacks_arena;
struct arena *callbacks_arena;
struct log_event_callback *first_callback;
struct log_event_callback *last_callback;
@ -86,7 +86,7 @@ void log_register_callback(log_event_callback_func *func, i32 level)
if (!atomic_i32_eval(&G.initialized)) { return; }
struct sys_lock lock = sys_mutex_lock_e(&G.callbacks_mutex);
{
struct log_event_callback *callback = arena_push(&G.callbacks_arena, struct log_event_callback);
struct log_event_callback *callback = arena_push(G.callbacks_arena, struct log_event_callback);
callback->func = func;
callback->level = level;
if (G.last_callback) {

View File

@ -59,7 +59,7 @@ GLOBAL struct {
struct v2 listener_dir;
/* Track list */
struct arena track_arena;
struct arena *track_arena;
struct track *track_first_playing;
struct track *track_last_playing;
u64 track_playing_count;
@ -119,7 +119,7 @@ INTERNAL struct track *track_alloc_locked(struct sys_lock *lock, struct sound *s
*track = (struct track) { .gen = track->gen + 1 };
} else {
/* Allocate new */
track = arena_push(&G.track_arena, struct track);
track = arena_push(G.track_arena, struct track);
track->gen = 1;
}

View File

@ -16,7 +16,7 @@
/* Add resource data to binary */
GLOBAL struct {
struct arena arena;
struct arena *arena;
#if RESOURCES_EMBEDDED
struct tar_archive archive;
@ -30,7 +30,7 @@ GLOBAL struct {
struct atomic_i32 watch_shutdown;
struct sys_mutex watch_dispatcher_mutex;
struct arena watch_dispatcher_info_arena;
struct arena *watch_dispatcher_info_arena;
struct sys_watch_info_list watch_dispatcher_info_list;
struct sys_condition_variable watch_dispatcher_cv;
@ -193,7 +193,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(resource_watch_monitor_thread_entry_poi
if (res.first && !atomic_i32_eval(&G.watch_shutdown)) {
struct sys_lock lock = sys_mutex_lock_e(&G.watch_dispatcher_mutex);
{
struct sys_watch_info_list list_part = sys_watch_info_copy(&G.watch_dispatcher_info_arena, res);
struct sys_watch_info_list list_part = sys_watch_info_copy(G.watch_dispatcher_info_arena, res);
if (G.watch_dispatcher_info_list.last) {
G.watch_dispatcher_info_list.last->next = list_part.first;
list_part.first->prev = G.watch_dispatcher_info_list.last;
@ -227,7 +227,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(resource_watch_dispatcher_thread_entry_
struct sys_lock watch_dispatcher_lock = sys_mutex_lock_e(&G.watch_dispatcher_mutex);
while (!atomic_i32_eval(&G.watch_shutdown)) {
sys_condition_variable_wait(&G.watch_dispatcher_cv, &watch_dispatcher_lock);
if (!atomic_i32_eval(&G.watch_shutdown) && G.watch_dispatcher_info_arena.pos > 0) {
if (!atomic_i32_eval(&G.watch_shutdown) && G.watch_dispatcher_info_arena->pos > 0) {
/* Unlock and sleep a bit so duplicate events pile up */
{
sys_mutex_unlock(&watch_dispatcher_lock);
@ -240,7 +240,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(resource_watch_dispatcher_thread_entry_
/* Pull watch info from queue */
struct sys_watch_info_list watch_info_list = sys_watch_info_copy(temp.arena, G.watch_dispatcher_info_list);
MEMZERO_STRUCT(&G.watch_dispatcher_info_list);
arena_reset(&G.watch_dispatcher_info_arena);
arena_reset(G.watch_dispatcher_info_arena);
/* Unlock and run callbacks */
sys_mutex_unlock(&watch_dispatcher_lock);

View File

@ -22,7 +22,7 @@ INTERNAL THREAD_LOCAL_VAR_RELEASE_FUNC_DEF(scratch_context_release, vctx)
#endif
for (u32 i = 0; i < ARRAY_COUNT(ctx->arenas); ++i) {
arena_release(&ctx->arenas[i]);
arena_release(ctx->arenas[i]);
}
}

View File

@ -13,7 +13,7 @@
* ========================== */
struct scratch_ctx {
struct arena arenas[SCRATCH_ARENAS_PER_THREAD];
struct arena *arenas[SCRATCH_ARENAS_PER_THREAD];
#if RTC
u64 next_scratch_id;
@ -61,9 +61,9 @@ INLINE struct arena_temp _scratch_begin(struct arena *potential_conflict)
ASSERT(potential_conflict != NULL);
struct scratch_ctx *ctx = (struct scratch_ctx *)thread_local_var_eval(&tl_scratch_ctx);
struct arena *scratch_arena = &ctx->arenas[0];
struct arena *scratch_arena = ctx->arenas[0];
if (potential_conflict && scratch_arena->base == potential_conflict->base) {
scratch_arena = &ctx->arenas[1];
scratch_arena = ctx->arenas[1];
}
struct arena_temp temp = arena_temp_begin(scratch_arena);
scratch_dbg_push(ctx, &temp);
@ -85,7 +85,7 @@ INLINE struct arena_temp _scratch_begin(struct arena *potential_conflict)
INLINE struct arena_temp _scratch_begin_no_conflict(void)
{
struct scratch_ctx *ctx = (struct scratch_ctx *)thread_local_var_eval(&tl_scratch_ctx);
struct arena *scratch_arena = &ctx->arenas[0];
struct arena *scratch_arena = ctx->arenas[0];
struct arena_temp temp = arena_temp_begin(scratch_arena);
scratch_dbg_push(ctx, &temp);
return temp;

View File

@ -41,7 +41,7 @@
* ========================== */
GLOBAL struct {
struct arena nil_arena;
struct arena *nil_arena;
struct sim_client_store *nil_client_store;
struct sim_client *nil_client;
struct sim_snapshot *nil_snapshot;
@ -65,21 +65,21 @@ struct sim_startup_receipt sim_startup(void)
G.nil_arena = arena_alloc(GIGABYTE(1));
/* Nil client store */
G.nil_client_store = arena_push(&G.nil_arena, struct sim_client_store);
G.nil_client_store = arena_push(G.nil_arena, struct sim_client_store);
G.nil_client_store->valid = false;
/* Nil client */
G.nil_client = arena_push(&G.nil_arena, struct sim_client);
G.nil_client = arena_push(G.nil_arena, struct sim_client);
G.nil_client->valid = false;
G.nil_client->store = sim_client_store_nil();
/* Nil snapshot */
G.nil_snapshot = arena_push(&G.nil_arena, struct sim_snapshot);
G.nil_snapshot = arena_push(G.nil_arena, struct sim_snapshot);
G.nil_snapshot->valid = false;
G.nil_snapshot->client = sim_client_nil();
/* Nil ent */
G.nil_ent = arena_push(&G.nil_arena, struct sim_ent);
G.nil_ent = arena_push(G.nil_arena, struct sim_ent);
G.nil_ent->ss = sim_snapshot_nil();
G.nil_ent->valid = false;
G.nil_ent->id = SIM_ENT_NIL_ID;
@ -93,7 +93,7 @@ struct sim_startup_receipt sim_startup(void)
G.nil_ent->sprite_tint = COLOR_WHITE;
/* Lock nil arena */
arena_set_readonly(&G.nil_arena);
arena_set_readonly(G.nil_arena);
return (struct sim_startup_receipt) { 0 };
}
@ -106,15 +106,15 @@ struct sim_client_store *sim_client_store_alloc(void)
__prof;
struct sim_client_store *store;
{
struct arena arena = arena_alloc(GIGABYTE(64));
store = arena_push(&arena, struct sim_client_store);
struct arena *arena = arena_alloc(GIGABYTE(64));
store = arena_push(arena, struct sim_client_store);
store->arena = arena;
}
store->valid = true;
store->num_client_lookup_bins = CLIENT_LOOKUP_BINS;
store->client_lookup_bins = arena_push_array(&store->arena, struct sim_client_lookup_bin, store->num_client_lookup_bins);
store->client_lookup_bins = arena_push_array(store->arena, struct sim_client_lookup_bin, store->num_client_lookup_bins);
store->clients_arena = arena_alloc(GIGABYTE(64));
store->clients = arena_push_dry(&store->clients_arena, struct sim_client);
store->clients = arena_push_dry(store->clients_arena, struct sim_client);
return store;
}
@ -127,8 +127,8 @@ void sim_client_store_release(struct sim_client_store *store)
sim_client_release(client);
}
}
arena_release(&store->clients_arena);
arena_release(&store->arena);
arena_release(store->clients_arena);
arena_release(store->arena);
}
/* ========================== *
@ -145,7 +145,7 @@ struct sim_client *sim_client_alloc(struct sim_client_store *store)
handle = client->handle;
++handle.gen;
} else {
client = arena_push_no_zero(&store->clients_arena, struct sim_client);
client = arena_push_no_zero(store->clients_arena, struct sim_client);
handle.gen = 1;
handle.idx = store->num_clients_reserved;
++store->num_clients_reserved;
@ -158,7 +158,7 @@ struct sim_client *sim_client_alloc(struct sim_client_store *store)
client->snapshots_arena = arena_alloc(GIGABYTE(8));
client->num_snapshot_lookup_bins = TICK_LOOKUP_BINS;
client->snapshot_lookup_bins = arena_push_array(&client->snapshots_arena, struct sim_snapshot_lookup_bin, client->num_snapshot_lookup_bins);
client->snapshot_lookup_bins = arena_push_array(client->snapshots_arena, struct sim_snapshot_lookup_bin, client->num_snapshot_lookup_bins);
return client;
}
@ -171,8 +171,8 @@ void sim_client_release(struct sim_client *client)
struct sim_snapshot *ss = bin->first;
while (ss) {
struct sim_snapshot *next = ss->next_in_bin;
arena_release(&ss->ents_arena);
arena_release(&ss->arena);
arena_release(ss->ents_arena);
arena_release(ss->arena);
ss = next;
}
}
@ -187,7 +187,7 @@ void sim_client_release(struct sim_client *client)
store->first_free_client = client->handle;
--store->num_clients_allocated;
++client->handle.gen;
arena_release(&client->snapshots_arena);
arena_release(client->snapshots_arena);
}
/* ========================== *
@ -282,8 +282,8 @@ struct sim_snapshot *sim_snapshot_alloc(struct sim_client *client, struct sim_sn
struct sim_snapshot *ss;
{
struct arena arena = ZI;
struct arena ents_arena = ZI;
struct arena *arena = ZI;
struct arena *ents_arena = NULL;
{
ss = client->first_free_snapshot;
if (ss) {
@ -297,12 +297,12 @@ struct sim_snapshot *sim_snapshot_alloc(struct sim_client *client, struct sim_sn
ents_arena = arena_alloc(GIGABYTE(1));
}
}
arena_reset(&arena);
ss = arena_push(&arena, struct sim_snapshot);
arena_reset(arena);
ss = arena_push(arena, struct sim_snapshot);
ss->arena = arena;
ss->ents_arena = ents_arena;
arena_reset(&ss->ents_arena);
arena_reset(ss->ents_arena);
}
ss->tick = tick;
@ -319,7 +319,7 @@ struct sim_snapshot *sim_snapshot_alloc(struct sim_client *client, struct sim_sn
/* Copy id lookup bins */
ss->num_id_bins = src->num_id_bins > 0 ? src->num_id_bins : ID_LOOKUP_BINS;
ss->id_bins = arena_push_array_no_zero(&ss->arena, struct sim_ent_bin, ss->num_id_bins);
ss->id_bins = arena_push_array_no_zero(ss->arena, struct sim_ent_bin, ss->num_id_bins);
if (src->num_id_bins > 0) {
for (u64 i = 0; i < src->num_id_bins; ++i) {
ss->id_bins[i] = src->id_bins[i];
@ -332,20 +332,20 @@ struct sim_snapshot *sim_snapshot_alloc(struct sim_client *client, struct sim_sn
ss->first_free_ent = src->first_free_ent;
ss->num_ents_allocated = src->num_ents_allocated;
ss->num_ents_reserved = src->num_ents_reserved;
ss->ents = arena_push_array_no_zero(&ss->ents_arena, struct sim_ent, ss->num_ents_reserved);
ss->ents = arena_push_array_no_zero(ss->ents_arena, struct sim_ent, ss->num_ents_reserved);
if (ss->num_ents_reserved == 0) {
/* Copying from nil snapshot, need to create blank & root entity */
/* Push blank ent at index 0 (because index 0 is never valid anyway since it maps to sim_ent_nil()) */
{
arena_push(&ss->ents_arena, struct sim_ent);
arena_push(ss->ents_arena, struct sim_ent);
++ss->num_ents_allocated;
++ss->num_ents_reserved;
}
/* Push root ent with constant id */
{
struct sim_ent *root = arena_push_no_zero(&ss->ents_arena, struct sim_ent);
struct sim_ent *root = arena_push_no_zero(ss->ents_arena, struct sim_ent);
*root = *sim_ent_nil();
root->ss = ss;
root->valid = true;
@ -845,7 +845,7 @@ void sim_snapshot_decode(struct bitbuff_reader *br, struct sim_snapshot *ss)
ss->num_ents_reserved = br_read_uv(br);
i64 reserve_diff = (i64)ss->num_ents_reserved - (i64)old_num_ents_reserved;
if (reserve_diff > 0) {
arena_push_array_no_zero(&ss->ents_arena, struct sim_ent, reserve_diff);
arena_push_array_no_zero(ss->ents_arena, struct sim_ent, reserve_diff);
for (u64 i = old_num_ents_reserved; i < ss->num_ents_reserved; ++i) {
struct sim_ent *e = &ss->ents[i];
*e = *sim_ent_nil();
@ -966,7 +966,7 @@ void sim_snapshot_decode(struct bitbuff_reader *br, struct sim_snapshot *ss)
ss->num_ents_reserved = br_read_uv(br);
i64 reserve_diff = (i64)ss->num_ents_reserved - (i64)old_num_ents_reserved;
if (reserve_diff > 0) {
arena_push_array_no_zero(&ss->ents_arena, struct sim_ent, reserve_diff);
arena_push_array_no_zero(ss->ents_arena, struct sim_ent, reserve_diff);
for (u64 i = old_num_ents_reserved; i < ss->num_ents_reserved; ++i) {
struct sim_ent *e = &ss->ents[i];
*e = *sim_ent_nil();
@ -1063,7 +1063,7 @@ void sim_snapshot_decode(struct bitbuff_reader *br, struct sim_snapshot *ss)
ss->num_ents_reserved = br_read_uv(br);
i64 reserve_diff = (i64)ss->num_ents_reserved - (i64)old_num_ents_reserved;
if (reserve_diff > 0) {
arena_push_array_no_zero(&ss->ents_arena, struct sim_ent, reserve_diff);
arena_push_array_no_zero(ss->ents_arena, struct sim_ent, reserve_diff);
for (u64 i = old_num_ents_reserved; i < ss->num_ents_reserved; ++i) {
struct sim_ent *e = &ss->ents[i];
*e = *sim_ent_nil();

View File

@ -32,14 +32,14 @@ struct sim_client_lookup_bin {
struct sim_client_store {
b32 valid;
struct arena arena;
struct arena *arena;
/* Client lookup */
struct sim_client_lookup_bin *client_lookup_bins;
u64 num_client_lookup_bins;
/* Clients */
struct arena clients_arena;
struct arena *clients_arena;
struct sim_client *clients;
struct sim_client_handle first_free_client;
u64 num_clients_allocated;
@ -71,7 +71,7 @@ struct sim_client {
struct sim_client_handle handle;
struct sim_client_store *store;
struct arena snapshots_arena;
struct arena *snapshots_arena;
/* Round trip time of the client (if networked) */
i64 last_rtt_ns;
@ -186,7 +186,7 @@ struct sim_snapshot {
u64 prev_tick;
u64 next_tick;
struct arena arena;
struct arena *arena;
/* Sim time (guaranteed to increase by sim_dt_ns each step) */
i64 sim_dt_ns;
@ -206,7 +206,7 @@ struct sim_snapshot {
u64 num_id_bins;
/* Entities */
struct arena ents_arena;
struct arena *ents_arena;
struct sim_ent *ents;
u32 first_free_ent;
u32 num_ents_allocated;

View File

@ -40,7 +40,7 @@ struct sim_ent *sim_ent_alloc_raw(struct sim_snapshot *ss, struct sim_ent *paren
ss->first_free_ent = ent->next_free;
} else {
/* Make new */
ent = arena_push_no_zero(&ss->ents_arena, struct sim_ent);
ent = arena_push_no_zero(ss->ents_arena, struct sim_ent);
++ss->num_ents_reserved;
}
*ent = *sim_ent_nil();

View File

@ -41,7 +41,7 @@ struct win32_sock {
GLOBAL struct {
WSADATA wsa_data;
struct arena win32_socks_arena;
struct arena *win32_socks_arena;
struct sys_mutex win32_socks_mutex;
struct win32_sock *first_free_win32_sock;
} G = ZI, DEBUG_ALIAS(G, G_sock_win32);
@ -303,7 +303,7 @@ INTERNAL struct win32_sock *win32_sock_alloc(void)
ws = G.first_free_win32_sock;
G.first_free_win32_sock = ws->next_free;
} else {
ws = arena_push_no_zero(&G.win32_socks_arena, struct win32_sock);
ws = arena_push_no_zero(G.win32_socks_arena, struct win32_sock);
}
sys_mutex_unlock(&lock);
}

View File

@ -19,7 +19,7 @@ struct sound_task_params {
struct sound_task_params_store {
struct sound_task_params *head_free;
struct arena arena;
struct arena *arena;
struct sys_mutex mutex;
};
@ -62,7 +62,7 @@ INTERNAL struct sound_task_params *sound_task_params_alloc(void)
p = G.params.head_free;
G.params.head_free = p->next_free;
} else {
p = arena_push(&G.params.arena, struct sound_task_params);
p = arena_push(G.params.arena, struct sound_task_params);
}
sys_mutex_unlock(&lock);
}

View File

@ -24,34 +24,34 @@ struct space *space_alloc(f32 cell_size, u32 num_bins_sqrt)
{
struct space *space;
{
struct arena arena = arena_alloc(GIGABYTE(64));
space = arena_push(&arena, struct space);
struct arena *arena = arena_alloc(GIGABYTE(64));
space = arena_push(arena, struct space);
space->entry_arena = arena;
}
space->valid = true;
space->entries = arena_push_dry(&space->entry_arena, struct space_entry);
space->entries = arena_push_dry(space->entry_arena, struct space_entry);
space->cell_arena = arena_alloc(GIGABYTE(64));
space->cell_size = cell_size;
space->num_bins = num_bins_sqrt * num_bins_sqrt;
space->num_bins_sqrt = num_bins_sqrt;
space->bins = arena_push_array(&space->cell_arena, struct space_cell_bin, space->num_bins);
space->bins = arena_push_array(space->cell_arena, struct space_cell_bin, space->num_bins);
return space;
}
void space_release(struct space *space)
{
arena_release(&space->cell_arena);
arena_release(&space->entry_arena);
arena_release(space->cell_arena);
arena_release(space->entry_arena);
}
void space_reset(struct space *space)
{
arena_pop_to(&space->entry_arena, (u64)space->entries - (u64)space->entry_arena.base);
arena_reset(&space->cell_arena);
space->bins = arena_push_array(&space->cell_arena, struct space_cell_bin, space->num_bins);
arena_pop_to(space->entry_arena, (u64)space->entries - (u64)space->entry_arena->base);
arena_reset(space->cell_arena);
space->bins = arena_push_array(space->cell_arena, struct space_cell_bin, space->num_bins);
space->num_entries_reserved = 0;
space->first_free_cell = NULL;
space->first_free_cell_node = NULL;
@ -140,7 +140,7 @@ INTERNAL void space_cell_node_alloc(struct v2i32 cell_pos, struct space_entry *e
cell = space->first_free_cell;
space->first_free_cell = cell->next_free;
} else {
cell = arena_push_no_zero(&space->cell_arena, struct space_cell);
cell = arena_push_no_zero(space->cell_arena, struct space_cell);
}
MEMZERO_STRUCT(cell);
if (bin->last_cell) {
@ -162,7 +162,7 @@ INTERNAL void space_cell_node_alloc(struct v2i32 cell_pos, struct space_entry *e
node = space->first_free_cell_node;
space->first_free_cell_node = node->next_free;
} else {
node = arena_push_no_zero(&space->cell_arena, struct space_cell_node);
node = arena_push_no_zero(space->cell_arena, struct space_cell_node);
}
MEMZERO_STRUCT(node);
}
@ -281,7 +281,7 @@ struct space_entry *space_entry_alloc(struct space *space, struct sim_ent_id ent
space->first_free_entry = entry->next_free;
handle = entry->handle;
} else {
entry = arena_push_no_zero(&space->entry_arena, struct space_entry);
entry = arena_push_no_zero(space->entry_arena, struct space_entry);
handle.idx = space->num_entries_reserved;
handle.gen = 1;
++space->num_entries_reserved;

View File

@ -56,14 +56,14 @@ struct space {
b32 valid;
f32 cell_size;
struct arena cell_arena;
struct arena *cell_arena;
struct space_cell_bin *bins;
i32 num_bins;
i32 num_bins_sqrt;
struct space_cell *first_free_cell;
struct space_cell_node *first_free_cell_node;
struct arena entry_arena;
struct arena *entry_arena;
u64 num_entries_reserved;
struct space_entry *entries;
struct space_entry *first_free_entry;

View File

@ -74,7 +74,7 @@ struct cache_entry {
/* NOTE: This data is finalized once entry state = loaded */
i64 load_time_ns;
u64 memory_usage;
struct arena arena;
struct arena *arena;
struct sprite_texture *texture;
struct sprite_sheet *sheet;
@ -98,7 +98,7 @@ struct cache_bin {
struct cache {
struct atomic_u64 memory_usage;
struct arena arena;
struct arena *arena;
struct cache_bin *bins;
struct sys_mutex entry_pool_mutex;
struct cache_entry *entry_pool_first_free;
@ -132,7 +132,7 @@ struct load_cmd {
* ========================== */
GLOBAL struct {
struct arena perm_arena;
struct arena *perm_arena;
struct sprite_texture *nil_texture;
struct sprite_texture *loading_texture;
struct sprite_sheet *nil_sheet;
@ -143,12 +143,12 @@ GLOBAL struct {
/* Load cmds */
struct sys_mutex load_cmds_mutex;
struct arena load_cmds_arena;
struct arena *load_cmds_arena;
struct load_cmd *first_free_load_cmd;
/* Scopes */
struct atomic_i32 scopes_lock;
struct arena scopes_arena;
struct arena *scopes_arena;
struct sprite_scope *first_free_scope;
/* Evictor thread */
@ -219,10 +219,10 @@ struct sprite_startup_receipt sprite_startup(struct gpu_startup_receipt *gpu_sr,
G.perm_arena = arena_alloc(MEGABYTE(1));
{
/* Init loading texture */
G.loading_texture = arena_push(&G.perm_arena, struct sprite_texture);
G.loading_texture = arena_push(G.perm_arena, struct sprite_texture);
/* Init nil texture */
G.nil_texture = arena_push(&G.perm_arena, struct sprite_texture);
G.nil_texture = arena_push(G.perm_arena, struct sprite_texture);
G.nil_texture->loaded = true;
{
struct arena_temp scratch = scratch_begin_no_conflict();
@ -232,21 +232,21 @@ struct sprite_startup_receipt sprite_startup(struct gpu_startup_receipt *gpu_sr,
}
/* Init loading sheet */
G.loading_sheet = arena_push(&G.perm_arena, struct sprite_sheet);
G.loading_sheet = arena_push(G.perm_arena, struct sprite_sheet);
G.loading_sheet->image_size = V2(IMAGE_PIXELS_PER_UNIT, IMAGE_PIXELS_PER_UNIT);
G.loading_sheet->frame_size = V2(IMAGE_PIXELS_PER_UNIT, IMAGE_PIXELS_PER_UNIT);
/* Init nil sheet */
G.nil_sheet = arena_push(&G.perm_arena, struct sprite_sheet);
G.nil_sheet = arena_push(G.perm_arena, struct sprite_sheet);
G.nil_sheet->image_size = V2(IMAGE_PIXELS_PER_UNIT, IMAGE_PIXELS_PER_UNIT);
G.nil_sheet->frame_size = V2(IMAGE_PIXELS_PER_UNIT, IMAGE_PIXELS_PER_UNIT);
G.nil_sheet->loaded = true;
}
arena_set_readonly(&G.perm_arena);
arena_set_readonly(G.perm_arena);
G.cache.entry_pool_mutex = sys_mutex_alloc();
G.cache.arena = arena_alloc(GIGABYTE(64));
G.cache.bins = arena_push_array(&G.cache.arena, struct cache_bin, CACHE_BINS_COUNT);
G.cache.bins = arena_push_array(G.cache.arena, struct cache_bin, CACHE_BINS_COUNT);
for (u64 i = 0; i < CACHE_BINS_COUNT; ++i) {
G.cache.bins[i].mutex = sys_mutex_alloc();
}
@ -321,7 +321,7 @@ INTERNAL void push_load_task(struct cache_ref ref, struct sprite_tag tag)
cmd = G.first_free_load_cmd;
G.first_free_load_cmd = cmd->next_free;
} else {
cmd = arena_push_no_zero(&G.load_cmds_arena, struct load_cmd);
cmd = arena_push_no_zero(G.load_cmds_arena, struct load_cmd);
}
sys_mutex_unlock(&lock);
}
@ -376,7 +376,7 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag t
if (decoded.success) {
/* Initialize */
e->texture = arena_push(&e->arena, struct sprite_texture);
e->texture = arena_push(e->arena, struct sprite_texture);
e->texture->width = decoded.image.width;
e->texture->height = decoded.image.height;
e->texture->texture = gpu_texture_alloc(GPU_TEXTURE_FORMAT_R8G8B8A8_UNORM_SRGB, 0, V2I32(decoded.image.width, decoded.image.height), decoded.image.pixels);
@ -387,8 +387,8 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag t
success = true;
}
}
arena_set_readonly(&e->arena);
e->memory_usage = e->arena.committed + memory_size;
arena_set_readonly(e->arena);
e->memory_usage = e->arena->committed + memory_size;
atomic_u64_eval_add_u64(&G.cache.memory_usage, e->memory_usage);
if (success) {
@ -700,16 +700,16 @@ INTERNAL void cache_entry_load_sheet(struct cache_ref ref, struct sprite_tag tag
resource_close(&sheet_rs);
/* Initialize */
e->sheet = arena_push_no_zero(&e->arena, struct sprite_sheet);
*e->sheet = init_sheet_from_ase_result(&e->arena, decoded);
e->sheet = arena_push_no_zero(e->arena, struct sprite_sheet);
*e->sheet = init_sheet_from_ase_result(e->arena, decoded);
e->sheet->loaded = true;
e->sheet->valid = true;
success = true;
}
}
arena_set_readonly(&e->arena);
e->memory_usage = e->arena.committed;
arena_set_readonly(e->arena);
e->memory_usage = e->arena->committed;
atomic_u64_eval_add_u64(&G.cache.memory_usage, e->memory_usage);
if (success) {
@ -823,9 +823,9 @@ struct sprite_scope *sprite_scope_begin(void)
bins = res->ref_node_bins;
pool = res->ref_node_pool;
} else {
res = arena_push_no_zero(&G.scopes_arena, struct sprite_scope);
bins = arena_push_array_no_zero(&G.scopes_arena, struct sprite_scope_cache_ref *, CACHE_BINS_COUNT);
pool = arena_push_array_no_zero(&G.scopes_arena, struct sprite_scope_cache_ref, MAX_SCOPE_REFERENCES);
res = arena_push_no_zero(G.scopes_arena, struct sprite_scope);
bins = arena_push_array_no_zero(G.scopes_arena, struct sprite_scope_cache_ref *, CACHE_BINS_COUNT);
pool = arena_push_array_no_zero(G.scopes_arena, struct sprite_scope_cache_ref, MAX_SCOPE_REFERENCES);
}
}
atomic_i32_eval_exchange(&G.scopes_lock, 0);
@ -946,7 +946,7 @@ INTERNAL struct sprite_scope_cache_ref *cache_entry_from_tag(struct sprite_scope
entry = G.cache.entry_pool_first_free;
G.cache.entry_pool_first_free = entry->next_free;
} else {
entry = arena_push_no_zero(&G.cache.arena, struct cache_entry);
entry = arena_push_no_zero(G.cache.arena, struct cache_entry);
}
sys_mutex_unlock(&pool_lock);
}
@ -1346,7 +1346,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
if (n->kind == CACHE_ENTRY_KIND_TEXTURE && n->texture->valid) {
gpu_release(n->texture->texture);
}
arena_release(&n->arena);
arena_release(n->arena);
}
}

View File

@ -118,25 +118,25 @@ GLOBAL struct {
/* Condition variables */
struct sys_mutex condition_variables_mutex;
struct arena condition_variables_arena;
struct arena *condition_variables_arena;
struct win32_condition_variable *first_free_condition_variable;
/* Thread params */
struct sys_mutex threads_mutex;
struct arena threads_arena;
struct arena *threads_arena;
struct win32_thread *threads_first;
struct win32_thread *threads_last;
struct win32_thread *threads_first_free;
/* Watches */
struct sys_mutex watches_mutex;
struct arena watches_arena;
struct arena *watches_arena;
struct win32_watch *watches_first_free;
/* Windows */
WNDCLASSEXW window_class;
struct sys_mutex windows_mutex;
struct arena windows_arena;
struct arena *windows_arena;
struct win32_window *first_free_window;
} G = ZI, DEBUG_ALIAS(G, G_sys_win32);
@ -698,7 +698,7 @@ struct sys_watch sys_watch_alloc(struct string dir_path)
w32_watch = G.watches_first_free;
G.watches_first_free = w32_watch->next_free;
} else {
w32_watch = arena_push_no_zero(&G.watches_arena, struct win32_watch);
w32_watch = arena_push_no_zero(G.watches_arena, struct win32_watch);
}
}
sys_mutex_unlock(&lock);
@ -1018,7 +1018,7 @@ INTERNAL struct win32_window *win32_window_alloc(void)
window = G.first_free_window;
G.first_free_window = window->next_free;
} else {
window = arena_push_no_zero(&G.windows_arena, struct win32_window);
window = arena_push_no_zero(G.windows_arena, struct win32_window);
}
sys_mutex_unlock(&lock);
}
@ -1659,7 +1659,7 @@ INTERNAL struct win32_condition_variable *win32_condition_variable_alloc(void)
cv = G.first_free_condition_variable;
G.first_free_condition_variable = cv->next_free;
} else {
cv = arena_push(&G.condition_variables_arena, struct win32_condition_variable);
cv = arena_push(G.condition_variables_arena, struct win32_condition_variable);
}
sys_mutex_unlock(&lock);
}
@ -1848,7 +1848,7 @@ INTERNAL struct win32_thread *win32_thread_alloc(void)
t = G.threads_first_free;
G.threads_first_free = t->next;
} else {
t = arena_push_no_zero(&G.threads_arena, struct win32_thread);
t = arena_push_no_zero(G.threads_arena, struct win32_thread);
}
MEMZERO_STRUCT(t);
if (!G.threads_first) {

View File

@ -33,8 +33,8 @@ struct thread_local_store thread_local_store_alloc(void)
__prof;
struct thread_local_store t = ZI;
t.arena = arena_alloc(THREAD_LOCAL_STORE_RESERVE);
t.lookup = arena_push_array(&t.arena, void *, MAX_THREAD_LOCAL_VARS);
t.allocation_order = arena_push_array(&t.arena, u64, MAX_THREAD_LOCAL_VARS);
t.lookup = arena_push_array(t.arena, void *, MAX_THREAD_LOCAL_VARS);
t.allocation_order = arena_push_array(t.arena, u64, MAX_THREAD_LOCAL_VARS);
return t;
}
@ -55,7 +55,7 @@ void thread_local_store_release(struct thread_local_store *t)
}
metas_unlock();
arena_release(&t->arena);
arena_release(t->arena);
}
void *_thread_local_var_eval(struct thread_local_var_meta *meta)
@ -93,8 +93,8 @@ void *_thread_local_var_eval(struct thread_local_var_meta *meta)
if (!data) {
__profscope(_thread_local_var_eval__ALLOC);
/* Allocate */
arena_align(&t->arena, meta->align);
data = arena_push_array_no_zero(&t->arena, u8, meta->size);
arena_align(t->arena, meta->align);
data = arena_push_array_no_zero(t->arena, u8, meta->size);
if (meta->alloc) {
meta->alloc(data);
} else {

View File

@ -7,7 +7,7 @@
struct thread_local_store {
void **lookup;
struct arena arena;
struct arena *arena;
u64 allocation_order_count;
u64 *allocation_order;
};

View File

@ -56,7 +56,7 @@ GLOBAL struct {
struct sys_thread local_sim_thread;
struct sim_ctx *local_sim_ctx;
struct arena arena;
struct arena *arena;
struct sys_window *window;
struct string connect_address_str;
@ -89,7 +89,7 @@ GLOBAL struct {
/* Debug console */
struct sys_mutex console_logs_mutex;
struct arena console_logs_arena;
struct arena *console_logs_arena;
struct console_log *first_console_log;
struct console_log *last_console_log;
i32 console_log_color_indices[LOG_LEVEL_COUNT];
@ -98,7 +98,7 @@ GLOBAL struct {
/* Window -> user */
struct sys_mutex sys_events_mutex;
struct arena sys_events_arena;
struct arena *sys_events_arena;
/* User -> local sim */
struct sys_mutex user_sim_cmd_mutex;
@ -228,7 +228,7 @@ struct user_startup_receipt user_startup(struct work_startup_receipt *work_sr,
G.real_time_ns = sys_time_ns();
/* TODO: Remove this */
G.connect_address_str = string_copy(&G.arena, connect_address_str);
G.connect_address_str = string_copy(G.arena, connect_address_str);
/* Initialize average dt to a reasonable value */
G.average_local_to_user_snapshot_publish_dt_ns = NS_FROM_SECONDS(1) / SIM_TICKS_PER_SECOND;
@ -304,11 +304,11 @@ INTERNAL struct sys_event_array pop_sys_events(struct arena *arena)
struct sys_event_array array = ZI;
struct sys_lock lock = sys_mutex_lock_e(&G.sys_events_mutex);
{
struct sys_event *src_events = (struct sys_event *)G.sys_events_arena.base;
array.count = G.sys_events_arena.pos / sizeof(*src_events);
struct sys_event *src_events = (struct sys_event *)G.sys_events_arena->base;
array.count = G.sys_events_arena->pos / sizeof(*src_events);
array.events = arena_push_array_no_zero(arena, struct sys_event, array.count);
MEMCPY(array.events, src_events, array.count * sizeof(*src_events));
arena_reset(&G.sys_events_arena);
arena_reset(G.sys_events_arena);
}
sys_mutex_unlock(&lock);
return array;
@ -318,7 +318,7 @@ INTERNAL SYS_WINDOW_EVENT_CALLBACK_FUNC_DEF(window_event_callback, event)
{
struct sys_lock lock = sys_mutex_lock_e(&G.sys_events_mutex);
{
*arena_push_no_zero(&G.sys_events_arena, struct sys_event) = event;
*arena_push_no_zero(G.sys_events_arena, struct sys_event) = event;
}
sys_mutex_unlock(&lock);
}
@ -464,9 +464,9 @@ INTERNAL LOG_EVENT_CALLBACK_FUNC_DEF(debug_console_log_callback, log)
{
struct sys_lock lock = sys_mutex_lock_e(&G.console_logs_mutex);
{
struct console_log *clog = arena_push(&G.console_logs_arena, struct console_log);
struct console_log *clog = arena_push(G.console_logs_arena, struct console_log);
clog->level = log.level;
clog->msg = string_copy(&G.console_logs_arena, log.msg);
clog->msg = string_copy(G.console_logs_arena, log.msg);
clog->datetime = log.datetime;
clog->time_ns = log.time_ns;

View File

@ -62,7 +62,7 @@ struct work_task {
* ========================== */
GLOBAL struct {
struct arena arena;
struct arena *arena;
b32 workers_shutdown;
struct sys_mutex mutex;
@ -125,7 +125,7 @@ struct work_startup_receipt work_startup(u32 num_worker_threads)
LIT("[P6] Worker %F"),
FMT_UINT(i));
struct worker *worker = arena_push(&G.arena, struct worker);
struct worker *worker = arena_push(G.arena, struct worker);
worker->thread = sys_thread_alloc(&worker_thread_entry_point, NULL, thread_name);
if (prev) {
prev->next = worker;
@ -181,7 +181,7 @@ INTERNAL struct work *work_alloc_locked(struct sys_lock *lock)
};
} else {
/* Make new */
work = arena_push_no_zero(&G.arena, struct work);
work = arena_push_no_zero(G.arena, struct work);
*work = (struct work) {
.condition_variable_finished = sys_condition_variable_alloc(),
.gen = 1
@ -226,7 +226,7 @@ INTERNAL struct work_task *task_alloc_locked(struct sys_lock *lock)
*task = (struct work_task) { 0 };
} else {
/* Make new */
task = arena_push(&G.arena, struct work_task);
task = arena_push(G.arena, struct work_task);
}
return task;