#include "arena.h" #include "sys.h" #include "memory.h" #include "string.h" #include "atomic.h" #include "gstat.h" /* NOTE: Application will exit if arena fails to reserve or commit initial memory. */ struct arena *arena_alloc(u64 reserve) { __prof; reserve += ARENA_HEADER_SIZE; /* Round up to nearest block size */ u64 block_remainder = reserve % ARENA_BLOCK_SIZE; if (block_remainder > 0) { reserve += ARENA_BLOCK_SIZE - block_remainder; } u8 *base = sys_memory_reserve(reserve); if (!base) { /* Hard fail on memory reserve failure for now */ sys_panic(LIT("Failed to reserve memory")); } u64 reserved = reserve; gstat_add(GSTAT_MEMORY_RESERVED, reserve); /* Commit initial block */ base = sys_memory_commit(base, ARENA_BLOCK_SIZE); if (!base) { /* Hard fail on commit failure */ sys_panic(LIT("Failed to commit initial memory block: System may be out of memory")); } ASSERT(((u64)base & 0xFFF) == 0); /* Base should be 4k aligned */ CT_ASSERT(ARENA_HEADER_SIZE <= ARENA_BLOCK_SIZE); /* Header must fit in first block */ CT_ASSERT(sizeof(struct arena) <= ARENA_HEADER_SIZE); /* Arena struct must fit in header */ __profalloc(base, ARENA_BLOCK_SIZE); ASAN_POISON(base + sizeof(struct arena), ARENA_BLOCK_SIZE - sizeof(struct arena)); gstat_add(GSTAT_MEMORY_COMMITTED, ARENA_BLOCK_SIZE); gstat_add(GSTAT_NUM_ARENAS, 1); /* Create & return arena header at beginning of block */ struct arena *arena = (struct arena *)base; MEMZERO_STRUCT(arena); arena->committed = ARENA_BLOCK_SIZE - ARENA_HEADER_SIZE; arena->reserved = reserved; return arena; } void arena_release(struct arena *arena) { ASAN_UNPOISON(arena, arena->committed + ARENA_HEADER_SIZE); __prof; __proffree(arena); gstat_sub(GSTAT_MEMORY_COMMITTED, arena->committed); gstat_sub(GSTAT_MEMORY_RESERVED, arena->reserved); gstat_sub(GSTAT_NUM_ARENAS, 1); sys_memory_release(arena); } /* NOTE: Application will exit if arena fails to commit memory */ void *arena_push_bytes_no_zero(struct arena *arena, u64 size, u64 align) { ASSERT(align > 0); ASSERT(!arena->readonly); void *ptr = NULL; u8 *base = arena_base(arena); /* Check to avoid aligning when size = 0 */ if (size > 0) { u64 aligned_start_pos = (arena->pos + (align - 1)); aligned_start_pos -= aligned_start_pos % align; u64 new_pos = aligned_start_pos + size; if (new_pos > arena->committed) { __profscope(_arena_push_bytes_COMMIT); /* Commit new block(s) */ u64 blocks_needed = (new_pos - arena->committed + ARENA_BLOCK_SIZE - 1) / ARENA_BLOCK_SIZE; u64 commit_bytes = blocks_needed * ARENA_BLOCK_SIZE; u64 new_capacity = arena->committed + commit_bytes; if (new_capacity > arena->reserved) { /* Hard fail if we overflow reserved memory for now */ sys_panic(LIT("Failed to commit new memory block: Overflow of reserved memory")); } void *commit_address = base + arena->committed; if (!sys_memory_commit(commit_address, commit_bytes)) { /* Hard fail on memory allocation failure for now */ sys_panic(LIT("Failed to commit new memory block: System may be out of memory")); } arena->committed += commit_bytes; gstat_add(GSTAT_MEMORY_COMMITTED, commit_bytes); __proffree(arena); __profalloc(arena, arena->committed + commit_bytes + ARENA_HEADER_SIZE); ASAN_POISON(commit_address, commit_bytes); } ptr = base + aligned_start_pos; ASAN_UNPOISON(ptr, new_pos - aligned_start_pos); arena->pos = new_pos; } else { ptr = base + arena->pos; } return ptr; } /* Copies the memory from the source arena into the destination arena, * replacing old contents. Destination arena will be expanded if necessary. */ void arena_copy_replace(struct arena *dst, struct arena *src) { arena_reset(dst); u64 data_size = src->pos; u8 *data_src = arena_base(src); u8 *data_dst = arena_push_bytes_no_zero(dst, data_size, 1); MEMCPY(data_dst, data_src, data_size); } void arena_decommit_unused_blocks(struct arena *arena) { /* Not implemented */ ASSERT(false); (UNUSED)arena; } void arena_set_readonly(struct arena *arena) { #if RTC arena->readonly = true; #endif sys_memory_set_committed_readonly(arena, arena->committed + ARENA_HEADER_SIZE); } void arena_set_readwrite(struct arena *arena) { sys_memory_set_committed_readwrite(arena, arena->committed + ARENA_HEADER_SIZE); #if RTC arena->readonly = false; #endif }