diff --git a/src/arena.c b/src/arena.c index 685b40ee..6a54df47 100644 --- a/src/arena.c +++ b/src/arena.c @@ -5,69 +5,62 @@ #include "atomic.h" #include "gstat.h" -/* Arbitrary block size */ -#define HEADER_SIZE 64 -#define BLOCK_SIZE 4096 - -/* NOTE: Application will exit if arena fails to reserve or commit initial - * memory. */ +/* NOTE: Application will exit if arena fails to reserve or commit initial memory. */ struct arena *arena_alloc(u64 reserve) { __prof; - reserve += HEADER_SIZE; + reserve += ARENA_HEADER_SIZE; /* Round up to nearest block size */ - u64 block_remainder = reserve % BLOCK_SIZE; + u64 block_remainder = reserve % ARENA_BLOCK_SIZE; if (block_remainder > 0) { - reserve += BLOCK_SIZE - block_remainder; + reserve += ARENA_BLOCK_SIZE - block_remainder; } - u8 *reserve_base = sys_memory_reserve(reserve + HEADER_SIZE); + u8 *reserve_base = sys_memory_reserve(reserve + ARENA_HEADER_SIZE); if (!reserve_base) { /* Hard fail on memory reserve failure for now */ sys_panic(LIT("Failed to reserve memory")); } u64 reserved = reserve; - gstat_add(GSTAT_MEMORY_RESERVED, reserve + HEADER_SIZE); + gstat_add(GSTAT_MEMORY_RESERVED, reserve + ARENA_HEADER_SIZE); /* Commit initial block */ - u8 *base = sys_memory_commit(reserve_base, BLOCK_SIZE); + u8 *base = sys_memory_commit(reserve_base, ARENA_BLOCK_SIZE); if (!base) { /* Hard fail on commit failure */ sys_panic(LIT("Failed to commit initial memory block: System may be out of memory")); } - ASAN_POISON(base + sizeof(struct arena), BLOCK_SIZE - sizeof(struct arena)); + ASAN_POISON(base + sizeof(struct arena), ARENA_BLOCK_SIZE - sizeof(struct arena)); ASSERT(((u64)base & 0xFFF) == 0); /* Base should be 4k aligned */ - CT_ASSERT(HEADER_SIZE <= BLOCK_SIZE); /* Header must fit in first block */ - CT_ASSERT(sizeof(struct arena) <= HEADER_SIZE); /* Arena struct must fit in header */ - gstat_add(GSTAT_MEMORY_COMMITTED, BLOCK_SIZE); - __profalloc(reserve_base, BLOCK_SIZE); + CT_ASSERT(ARENA_HEADER_SIZE <= ARENA_BLOCK_SIZE); /* Header must fit in first block */ + CT_ASSERT(sizeof(struct arena) <= ARENA_HEADER_SIZE); /* Arena struct must fit in header */ + gstat_add(GSTAT_MEMORY_COMMITTED, ARENA_BLOCK_SIZE); + __profalloc(reserve_base, ARENA_BLOCK_SIZE); /* Create arena struct at end of header block */ struct arena *arena = (struct arena *)base; MEMZERO_STRUCT(arena); - base += HEADER_SIZE; - u64 committed = BLOCK_SIZE - HEADER_SIZE; + base += ARENA_HEADER_SIZE; + u64 committed = ARENA_BLOCK_SIZE - ARENA_HEADER_SIZE; gstat_add(GSTAT_NUM_ARENAS, 1); arena->committed = committed; - arena->reserve_base = reserve_base; - arena->base = base; arena->reserved = reserved; return arena; } void arena_release(struct arena *arena) { - ASAN_UNPOISON(arena->reserve_, arena->committed + HEADER_SIZE); + ASAN_UNPOISON(arena->reserve_, arena->committed + ARENA_HEADER_SIZE); __prof; __proffree(arena->reserve_base); gstat_sub(GSTAT_MEMORY_COMMITTED, arena->committed); gstat_sub(GSTAT_MEMORY_RESERVED, arena->reserved); gstat_sub(GSTAT_NUM_ARENAS, 1); - sys_memory_release(arena->reserve_base); + sys_memory_release(arena); } /* NOTE: Application will exit if arena fails to commit memory */ @@ -87,14 +80,14 @@ void *arena_push_bytes_no_zero(struct arena *arena, u64 size, u64 align) if (new_pos > arena->committed) { __profscope(_arena_push_bytes_COMMIT); /* Commit new block(s) */ - u64 blocks_needed = (new_pos - arena->committed + BLOCK_SIZE - 1) / BLOCK_SIZE; - u64 commit_bytes = blocks_needed * BLOCK_SIZE; + u64 blocks_needed = (new_pos - arena->committed + ARENA_BLOCK_SIZE - 1) / ARENA_BLOCK_SIZE; + u64 commit_bytes = blocks_needed * ARENA_BLOCK_SIZE; u64 new_capacity = arena->committed + commit_bytes; if (new_capacity > arena->reserved) { /* Hard fail if we overflow reserved memory for now */ sys_panic(LIT("Failed to commit new memory block: Overflow of reserved memory")); } - void *commit_address = arena->base + arena->committed; + void *commit_address = arena_base(arena) + arena->committed; if (!sys_memory_commit(commit_address, commit_bytes)) { /* Hard fail on memory allocation failure for now */ sys_panic(LIT("Failed to commit new memory block: System may be out of memory")); @@ -102,14 +95,14 @@ void *arena_push_bytes_no_zero(struct arena *arena, u64 size, u64 align) arena->committed += commit_bytes; gstat_add(GSTAT_MEMORY_COMMITTED, commit_bytes); __proffree(arena->reserve_base); - __profalloc(arena->reserve_base, arena->committed + commit_bytes + HEADER_SIZE); + __profalloc(arena->reserve_base, arena->committed + commit_bytes + ARENA_HEADER_SIZE); ASAN_POISON(commit_address, commit_bytes); } - start = arena->base + aligned_start_pos; + start = arena_base(arena) + aligned_start_pos; arena->pos = new_pos; - ASAN_UNPOISON(start, (arena->base + arena->pos) - (u8 *)start); + ASAN_UNPOISON(start, (arena_base(arena) + arena->pos) - (u8 *)start); } else { - start = arena->base + arena->pos; + start = arena_base(arena) + arena->pos; } return start; @@ -117,23 +110,23 @@ void *arena_push_bytes_no_zero(struct arena *arena, u64 size, u64 align) /* Copies the memory from the source arena into the destination arena, * replacing old contents. Destination arena will be expanded if necessary. */ -void arena_copy_replace(struct arena *dest, struct arena *src) +void arena_copy_replace(struct arena *dst, struct arena *src) { - arena_reset(dest); + arena_reset(dst); u64 data_size = src->pos; - u8 *data_src = src->base; - u8 *data_dest = arena_push_bytes_no_zero(dest, data_size, 1); - MEMCPY(data_dest, data_src, data_size); + u8 *data_src = arena_base(src); + u8 *data_dst = arena_push_bytes_no_zero(dst, data_size, 1); + MEMCPY(data_dst, data_src, data_size); } void arena_decommit_unused_blocks(struct arena *arena) { #if 0 ASSERT(!arena->readonly); - u64 next_block_pos = BLOCK_SIZE * ((arena->pos + (BLOCK_SIZE - 1)) / BLOCK_SIZE); + u64 next_block_pos = ARENA_BLOCK_SIZE * ((arena->pos + (ARENA_BLOCK_SIZE - 1)) / ARENA_BLOCK_SIZE); if (arena->committed > next_block_pos) { - u8 *decommit_start = arena->base + next_block_pos; - u64 decommit_size = (arena->base + arena->committed) - decommit_start; + u8 *decommit_start = arena_base(arena) + next_block_pos; + u64 decommit_size = (arena_base(arena) + arena->committed) - decommit_start; sys_memory_decommit(decommit_start, decommit_size); arena->committed = next_block_pos; gstat_sub(GSTAT_MEMORY_COMMITTED, decommit_size); @@ -150,12 +143,12 @@ void arena_set_readonly(struct arena *arena) #if RTC arena->readonly = true; #endif - sys_memory_set_committed_readonly(arena->reserve_base, arena->committed + HEADER_SIZE); + sys_memory_set_committed_readonly(arena, arena->committed + ARENA_HEADER_SIZE); } void arena_set_readwrite(struct arena *arena) { - sys_memory_set_committed_readwrite(arena->reserve_base, arena->committed + HEADER_SIZE); + sys_memory_set_committed_readwrite(arena, arena->committed + ARENA_HEADER_SIZE); #if RTC arena->readonly = false; #endif diff --git a/src/arena.h b/src/arena.h index e5c8f8bf..c5e97514 100644 --- a/src/arena.h +++ b/src/arena.h @@ -3,14 +3,19 @@ #include "memory.h" +#define ARENA_HEADER_SIZE 64 +#define ARENA_BLOCK_SIZE 4096 + #define arena_push(a, type) ((type *)arena_push_bytes((a), sizeof(type), alignof(type))) #define arena_push_no_zero(a, type) ((type *)arena_push_bytes_no_zero((a), sizeof(type), alignof(type))) #define arena_push_array(a, type, n) ((type *)arena_push_bytes((a), (sizeof(type) * (n)), alignof(type))) #define arena_push_array_no_zero(a, type, n) ((type *)arena_push_bytes_no_zero((a), (sizeof(type) * (n)), alignof(type))) -#define arena_pop(a, type, dest) arena_pop_struct((a), sizeof(type), dest) -#define arena_pop_array(a, type, n, dest) arena_pop_struct((a), sizeof(type) * (n), dest) +#define arena_pop(a, type, dst) arena_pop_struct((a), sizeof(type), dst) +#define arena_pop_array(a, type, n, dst) arena_pop_struct((a), sizeof(type) * (n), dst) + +#define arena_base(a) ((u8 *)(a) + ARENA_HEADER_SIZE) /* Returns a pointer to where the next allocation would be (at alignment of type). * Equivalent to arena_push but without actually allocating anything or modifying the arena. */ @@ -28,7 +33,7 @@ struct arena_temp { struct arena *arena_alloc(u64 reserve); void arena_release(struct arena *arena); void *arena_push_bytes_no_zero(struct arena *arena, u64 size, u64 align); -void arena_copy_replace(struct arena *dest, struct arena *src); +void arena_copy_replace(struct arena *dst, struct arena *src); void arena_decommit_unused_blocks(struct arena *arena); void arena_set_readonly(struct arena *arena); void arena_set_readwrite(struct arena *arena); @@ -45,20 +50,20 @@ INLINE void arena_pop_to(struct arena *arena, u64 pos) ASSERT(arena->pos >= pos); ASSERT(!arena->readonly); - ASAN_POISON(arena->base + pos, arena->pos - pos); + ASAN_POISON(arena_base(arena) + pos, arena->pos - pos); arena->pos = pos; } -INLINE void arena_pop_struct(struct arena *arena, u64 size, void *copy_dest) +INLINE void arena_pop_struct(struct arena *arena, u64 size, void *copy_dst) { ASSERT(arena->pos >= size); ASSERT(!arena->readonly); u64 new_pos = arena->pos - size; - void *src = (void *)(arena->base + new_pos); - MEMCPY(copy_dest, src, size); + void *src = (void *)(arena_base(arena) + new_pos); + MEMCPY(copy_dst, src, size); - ASAN_POISON(arena->base + new_pos, arena->pos - new_pos); + ASAN_POISON(arena_base(arena) + new_pos, arena->pos - new_pos); arena->pos = new_pos; } @@ -72,12 +77,12 @@ INLINE void *arena_align(struct arena *arena, u64 align) if (align_bytes > 0) { return (void *)arena_push_array_no_zero(arena, u8, align_bytes); } else { - return (void *)(arena->base + arena->pos); + return (void *)(arena_base(arena) + arena->pos); } } else { /* 0 alignment */ ASSERT(false); - return (void *)(arena->base + arena->pos); + return (void *)(arena_base(arena) + arena->pos); } } @@ -102,7 +107,7 @@ INLINE void arena_reset(struct arena *arena) INLINE struct string arena_to_string(struct arena *arena) { struct string b; - b.text = arena->base; + b.text = arena_base(arena); b.len = arena->pos; return b; } @@ -111,7 +116,7 @@ INLINE void *_arena_push_dry(struct arena *arena, u64 align) { u64 aligned_start_pos = (arena->pos + (align - 1)); aligned_start_pos -= aligned_start_pos % align; - void *ptr = arena->base + aligned_start_pos; + void *ptr = arena_base(arena) + aligned_start_pos; return ptr; } diff --git a/src/bitbuff.c b/src/bitbuff.c index ad166aca..0a372c3e 100644 --- a/src/bitbuff.c +++ b/src/bitbuff.c @@ -119,7 +119,7 @@ struct bitbuff_writer bw_from_bitbuff(struct bitbuff *bb) struct bitbuff_writer res = ZI; res.bb = bb; if (bb->is_backed_by_arena) { - res.base = bb->arena->base; + res.base = arena_base(bb->arena); } else { res.base = bb->fixed_buffer.text; } @@ -383,7 +383,7 @@ struct bitbuff_reader br_from_bitbuff(struct bitbuff *bb) res.base_len = bb->fixed_buffer.len; } else { struct arena *arena = bb->arena; - res.base = arena->base; + res.base = arena_base(arena); res.base_len = arena->pos; } res.cur_bit = 0; diff --git a/src/buddy.c b/src/buddy.c index 0dcdb299..6c08e44f 100644 --- a/src/buddy.c +++ b/src/buddy.c @@ -124,7 +124,7 @@ INTERNAL struct buddy_block *buddy_block_get_unused(struct buddy_ctx *ctx, struc struct buddy_block *left = buddy_block_alloc_internal(ctx); left->used = true; left->level = level; - left->memory = arena->base; + left->memory = arena_base(arena); /* Create right (unused) block from new arena memory */ struct buddy_block *right = buddy_block_alloc_internal(ctx); diff --git a/src/common.h b/src/common.h index 24d577cf..c50e4a39 100644 --- a/src/common.h +++ b/src/common.h @@ -406,8 +406,6 @@ struct arena { u64 pos; u64 committed; u64 reserved; - u8 *base; - u8 *reserve_base; #if RTC b32 readonly; #endif diff --git a/src/scratch.h b/src/scratch.h index 72e6cf8b..43538f38 100644 --- a/src/scratch.h +++ b/src/scratch.h @@ -62,7 +62,7 @@ INLINE struct arena_temp _scratch_begin(struct arena *potential_conflict) struct scratch_ctx *ctx = (struct scratch_ctx *)thread_local_var_eval(&tl_scratch_ctx); struct arena *scratch_arena = ctx->arenas[0]; - if (potential_conflict && scratch_arena->base == potential_conflict->base) { + if (potential_conflict && scratch_arena == potential_conflict) { scratch_arena = ctx->arenas[1]; } struct arena_temp temp = arena_temp_begin(scratch_arena); diff --git a/src/space.c b/src/space.c index 550ec882..cfeefdf8 100644 --- a/src/space.c +++ b/src/space.c @@ -49,7 +49,7 @@ void space_release(struct space *space) void space_reset(struct space *space) { - arena_pop_to(space->entry_arena, (u64)space->entries - (u64)space->entry_arena->base); + arena_pop_to(space->entry_arena, (u64)space->entries - (u64)arena_base(space->entry_arena)); arena_reset(space->cell_arena); space->bins = arena_push_array(space->cell_arena, struct space_cell_bin, space->num_bins); space->num_entries_reserved = 0; diff --git a/src/sprite.c b/src/sprite.c index cf50cd10..fb9a3ab7 100644 --- a/src/sprite.c +++ b/src/sprite.c @@ -1283,7 +1283,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg) } /* Scratch arena should only contain evict array at this point */ - ASSERT(((scratch.arena->base + scratch.arena->pos) - (sizeof(*evict_array) * evict_array_count)) == (u8 *)evict_array); + ASSERT(((arena_base(scratch.arena) + scratch.arena->pos) - (sizeof(*evict_array) * evict_array_count)) == (u8 *)evict_array); /* Sort evict nodes */ { diff --git a/src/user.c b/src/user.c index d715e1e5..6f16cb5f 100644 --- a/src/user.c +++ b/src/user.c @@ -304,7 +304,7 @@ INTERNAL struct sys_event_array pop_sys_events(struct arena *arena) struct sys_event_array array = ZI; struct sys_lock lock = sys_mutex_lock_e(&G.sys_events_mutex); { - struct sys_event *src_events = (struct sys_event *)G.sys_events_arena->base; + struct sys_event *src_events = (struct sys_event *)arena_base(G.sys_events_arena); array.count = G.sys_events_arena->pos / sizeof(*src_events); array.events = arena_push_array_no_zero(arena, struct sys_event, array.count); MEMCPY(array.events, src_events, array.count * sizeof(*src_events));