base_core refactor

This commit is contained in:
jacob 2025-07-29 20:56:33 -05:00
parent 8dfd996cdf
commit 580447d3d5
77 changed files with 1353 additions and 1356 deletions

24
build.c
View File

@ -568,15 +568,15 @@ void OnBuild(StringList cli_args)
{
if (!arg_crtlib)
{
Error(Lit("CRTLIB (C runtime library) Must be enabled when compiling with RTC (runtime checks)"));
Error(Lit("CrtlibIsEnabled (C runtime library) Must be enabled when compiling with RtcIsEnabled (runtime checks)"));
OS_Exit(1);
}
StringListAppend(&perm, &compile_args, Lit("-DRTC=1"));
StringListAppend(&perm, &compile_args, Lit("-DRtcIsEnabled=1"));
if (arg_msvc)
{
if (!arg_asan)
{
/* Enable /RTC option (not compatible with ASAN) */
/* Enable /RtcIsEnabled option (not compatible with AsanIsEnabled) */
StringListAppend(&perm, &compile_args, Lit("/RTCcsu"));
}
}
@ -591,7 +591,7 @@ void OnBuild(StringList cli_args)
//- Crtlib
if (arg_crtlib)
{
StringListAppend(&perm, &compile_args, Lit("-DCRTLIB=1"));
StringListAppend(&perm, &compile_args, Lit("-DCrtlibIsEnabled=1"));
}
else
{
@ -610,7 +610,7 @@ void OnBuild(StringList cli_args)
//- Optimization
if (arg_unoptimized)
{
StringListAppend(&perm, &compile_args, Lit("-DUNOPTIMIZED=1"));
StringListAppend(&perm, &compile_args, Lit("-DUnoptimizedIsEnabled=1"));
if (arg_msvc)
{
StringListAppend(&perm, &compile_args, Lit("/Od"));
@ -638,7 +638,7 @@ void OnBuild(StringList cli_args)
//- Debug info
if (arg_debinfo)
{
StringListAppend(&perm, &compile_args, Lit("-DDEBINFO=1"));
StringListAppend(&perm, &compile_args, Lit("-DDebinfoEnabled=1"));
if (arg_msvc)
{
StringListAppend(&perm, &compile_args, Lit("/JMC /Zi"));
@ -656,10 +656,10 @@ void OnBuild(StringList cli_args)
{
if (!arg_crtlib)
{
Error(Lit("CRTLIB (C runtime library) Must be enabled when compiling with asan enabled"));
Error(Lit("CrtlibIsEnabled (C runtime library) Must be enabled when compiling with asan enabled"));
OS_Exit(1);
}
StringListAppend(&perm, &compile_args, Lit("-DASAN=1"));
StringListAppend(&perm, &compile_args, Lit("-DAsanIsEnabled=1"));
if (arg_msvc)
{
StringListAppend(&perm, &compile_args, Lit("/fsanitize=address"));
@ -673,7 +673,7 @@ void OnBuild(StringList cli_args)
//- Developer mode
if (arg_developer)
{
StringListAppend(&perm, &compile_args, Lit("-DDEVELOPER=1"));
StringListAppend(&perm, &compile_args, Lit("-DDeveloperIsEnabled=1"));
}
//- Profiling
@ -681,7 +681,7 @@ void OnBuild(StringList cli_args)
{
if (!arg_crtlib)
{
Error(Lit("CRTLIB (C runtime library) must be enabled when compiling with profiling enabled"));
Error(Lit("CrtlibIsEnabled (C runtime library) must be enabled when compiling with profiling enabled"));
OS_Exit(1);
}
if (arg_msvc)
@ -689,7 +689,7 @@ void OnBuild(StringList cli_args)
Error(Lit("MSVC not supported with profiling enabled (Profiling relies on Clang attributes)"));
OS_Exit(1);
}
StringListAppend(&perm, &compile_args, Lit("-DPROFILING=1"));
StringListAppend(&perm, &compile_args, Lit("-DProfilingIsEnabled=1"));
/* Tracy include path */
if (tracy_src_dir_path.len == 0 || !OS_DirExists(tracy_src_dir_path))
@ -705,7 +705,7 @@ void OnBuild(StringList cli_args)
if (!arg_msvc)
{
String incbin_dir = StringReplace(&perm, out_inc_dir_path, Lit("\\"), Lit("/"));
StringListAppend(&perm, &compile_args, StringF(&perm, Lit("-DINCBIN_DIR_RAW=\"%F\""), FmtStr(incbin_dir)));
StringListAppend(&perm, &compile_args, StringF(&perm, Lit("-DIncbinRawDir=\"%F\""), FmtStr(incbin_dir)));
}
}

View File

@ -1,13 +1,13 @@
GLOBAL struct {
Global struct {
Arena *arena;
String write_path;
} G = ZI, DEBUG_ALIAS(G, G_app);
} G = ZI, DebugAlias(G, G_app);
/* ========================== *
* Write directory
* ========================== */
INTERNAL String initialize_write_directory(Arena *arena, String write_dir)
internal String initialize_write_directory(Arena *arena, String write_dir)
{
TempArena scratch = BeginScratch(arena);
@ -41,7 +41,7 @@ String app_write_path_cat(Arena *arena, String filename)
* Default settings
* ========================== */
INTERNAL P_WindowSettings default_window_settings(P_Window *window)
internal P_WindowSettings default_window_settings(P_Window *window)
{
__prof;
@ -78,7 +78,7 @@ struct app_arg_list {
};
/* TODO: Remove this and do real argument parsing */
INTERNAL struct app_arg_list parse_args(Arena *arena, String args_str)
internal struct app_arg_list parse_args(Arena *arena, String args_str)
{
struct app_arg_list res = ZI;
i64 mode = 0;
@ -178,17 +178,17 @@ void P_AppStartup(String args_str)
#if !RTC
#if !RtcIsEnabled
/* Verify test modes aren't left on by accident in release mode */
STATIC_ASSERT(BB_DebugIsEnabled == 0);
STATIC_ASSERT(BITBUFF_TEST == 0);
StaticAssert(BB_DebugIsEnabled == 0);
StaticAssert(BITBUFF_TEST == 0);
#endif
#if BITBUFF_TEST
BB_Test();
#endif
G.arena = AllocArena(GIBI(64));
G.arena = AllocArena(Gibi(64));
G.write_path = initialize_write_directory(G.arena, LIT(WRITE_DIR));

View File

@ -9,9 +9,9 @@ struct huff_bb {
u64 cur_bit;
};
INTERNAL u32 peek_bits(struct huff_bb *bb, u32 nbits)
internal u32 peek_bits(struct huff_bb *bb, u32 nbits)
{
ASSERT(nbits <= 32);
Assert(nbits <= 32);
u64 cur_byte = bb->cur_bit >> 3;
u8 bit_index = bb->cur_bit % 8;
@ -20,19 +20,19 @@ INTERNAL u32 peek_bits(struct huff_bb *bb, u32 nbits)
u64 val64 = 0;
MEMCPY(&val64, &bb->data[cur_byte], nbytes);
u32 val32 = (u32)(val64 >> bit_index);
val32 &= U32_MAX >> (32 - nbits);
val32 &= U32Max >> (32 - nbits);
return val32;
}
INTERNAL u32 consume_bits(struct huff_bb *bb, u32 nbits)
internal u32 consume_bits(struct huff_bb *bb, u32 nbits)
{
u32 val = peek_bits(bb, nbits);
bb->cur_bit += nbits;
return val;
}
INTERNAL void skip_bits(struct huff_bb *bb, u32 nbits)
internal void skip_bits(struct huff_bb *bb, u32 nbits)
{
bb->cur_bit += nbits;
}
@ -61,11 +61,11 @@ struct huffman {
struct huffman_entry *entries;
};
GLOBAL READONLY u32 g_hclen_order[] = {
Global Readonly u32 g_hclen_order[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15
};
GLOBAL READONLY struct huffman_entry g_length_table[] = {
Global Readonly struct huffman_entry g_length_table[] = {
{3, 0}, /* 257 */
{4, 0}, /* 258 */
{5, 0}, /* 259 */
@ -97,7 +97,7 @@ GLOBAL READONLY struct huffman_entry g_length_table[] = {
{258, 0}, /* 285 */
};
GLOBAL READONLY struct huffman_entry g_dist_table[] = {
Global Readonly struct huffman_entry g_dist_table[] = {
{1, 0}, /* 0 */
{2, 0}, /* 1 */
{3, 0}, /* 2 */
@ -130,7 +130,7 @@ GLOBAL READONLY struct huffman_entry g_dist_table[] = {
{24577, 13}, /* 29 */
};
GLOBAL READONLY u32 g_fixed_bl_counts[][2] = {
Global Readonly u32 g_fixed_bl_counts[][2] = {
{143, 8},
{255, 9},
{279, 7},
@ -138,7 +138,7 @@ GLOBAL READONLY u32 g_fixed_bl_counts[][2] = {
{319, 5},
};
INTERNAL u32 reverse_bits(u32 v, u32 bit_count)
internal u32 reverse_bits(u32 v, u32 bit_count)
{
/* 7 & 15 seem to be the most common bit_counts, so a
* more optimal path is layed out for them. */
@ -172,7 +172,7 @@ INTERNAL u32 reverse_bits(u32 v, u32 bit_count)
}
}
INTERNAL struct huffman huffman_init(Arena *arena, u32 max_code_bits, u32 *bl_counts, u32 bl_counts_count)
internal struct huffman huffman_init(Arena *arena, u32 max_code_bits, u32 *bl_counts, u32 bl_counts_count)
{
__prof;
@ -184,7 +184,7 @@ INTERNAL struct huffman huffman_init(Arena *arena, u32 max_code_bits, u32 *bl_co
u32 code_length_hist[HUFFMAN_BIT_COUNT] = ZI;
for (u32 i = 0; i < bl_counts_count; ++i) {
u32 count = bl_counts[i];
ASSERT(count <= countof(code_length_hist));
Assert(count <= countof(code_length_hist));
++code_length_hist[count];
}
@ -198,7 +198,7 @@ INTERNAL struct huffman huffman_init(Arena *arena, u32 max_code_bits, u32 *bl_co
for (u32 i = 0; i < bl_counts_count; ++i) {
u32 code_bits = bl_counts[i];
if (code_bits) {
ASSERT(code_bits < countof(next_code));
Assert(code_bits < countof(next_code));
u32 code = next_code[code_bits]++;
u32 arbitrary_bits = res.max_code_bits - code_bits;
u32 entry_count = (1 << arbitrary_bits);
@ -216,20 +216,20 @@ INTERNAL struct huffman huffman_init(Arena *arena, u32 max_code_bits, u32 *bl_co
return res;
}
INTERNAL u16 huffman_decode(struct huffman *huffman, struct huff_bb *bb)
internal u16 huffman_decode(struct huffman *huffman, struct huff_bb *bb)
{
u32 index = peek_bits(bb, huffman->max_code_bits);
ASSERT(index < huffman->entries_count);
Assert(index < huffman->entries_count);
struct huffman_entry *entry = &huffman->entries[index];
u16 res = entry->symbol;
skip_bits(bb, entry->bits_used);
ASSERT(entry->bits_used > 0);
Assert(entry->bits_used > 0);
return res;
}
INTERNAL void inflate(u8 *dst, u8 *encoded)
internal void inflate(u8 *dst, u8 *encoded)
{
TempArena scratch = BeginScratchNoConflict();
@ -239,19 +239,19 @@ INTERNAL void inflate(u8 *dst, u8 *encoded)
/* ZLIB header */
u32 cm = consume_bits(&bb, 4);
u32 cinfo = consume_bits(&bb, 4);
ASSERT(cm == 8);
ASSERT(cinfo == 7);
Assert(cm == 8);
Assert(cinfo == 7);
u32 fcheck = consume_bits(&bb, 5);
u32 fdict = consume_bits(&bb, 1);
u32 flevl = consume_bits(&bb, 2);
ASSERT(fdict == 0);
Assert(fdict == 0);
u8 cmf = (u8)(cm | (cinfo << 4));
u8 flg = fcheck | (fdict << 5) | (flevl << 6);
(UNUSED)cmf;
(UNUSED)flg;
ASSERT(((cmf * 256) + flg) % 31 == 0);
Assert(((cmf * 256) + flg) % 31 == 0);
u8 bfinal = 0;
while (!bfinal) {
@ -262,7 +262,7 @@ INTERNAL void inflate(u8 *dst, u8 *encoded)
skip_bits(&bb, (8 - (bb.cur_bit % 8)) % 8);
i16 len = consume_bits(&bb, 16);
i16 nlen = consume_bits(&bb, 16);
ASSERT(len == ~nlen); /* Validation */
Assert(len == ~nlen); /* Validation */
(UNUSED)nlen;
while (len-- > 0) {
*dst++ = consume_bits(&bb, 8);
@ -295,7 +295,7 @@ INTERNAL void inflate(u8 *dst, u8 *encoded)
/* Decode dict huffman */
u32 lit_len_count = 0;
u32 len_count = hlit + hdist;
ASSERT(len_count <= countof(lit_len_dist_table));
Assert(len_count <= countof(lit_len_dist_table));
while (lit_len_count < len_count) {
u32 rep_count = 1;
u32 rep_val = 0;
@ -304,7 +304,7 @@ INTERNAL void inflate(u8 *dst, u8 *encoded)
rep_val = encoded_len;
} else if (encoded_len == 16) {
rep_count = 3 + consume_bits(&bb, 2);
ASSERT(lit_len_count > 0);
Assert(lit_len_count > 0);
rep_val = lit_len_dist_table[lit_len_count - 1];
} else if (encoded_len == 17) {
rep_count = 3 + consume_bits(&bb, 3);
@ -312,14 +312,14 @@ INTERNAL void inflate(u8 *dst, u8 *encoded)
rep_count = 11 + consume_bits(&bb, 7);
} else {
/* Invalid len */
ASSERT(0);
Assert(0);
}
while (rep_count--) {
lit_len_dist_table[lit_len_count++] = rep_val;
}
}
ASSERT(lit_len_count == len_count);
Assert(lit_len_count == len_count);
} else {
/* Fixed table */
hlit = 288;
@ -372,7 +372,7 @@ INTERNAL void inflate(u8 *dst, u8 *encoded)
case BLOCK_TYPE_RESERVED: {
/* TODO */
ASSERT(0);
Assert(0);
} break;
}
}
@ -408,7 +408,7 @@ enum cel_type {
CEL_TYPE_COMPRESSED_TILEMAP = 3
};
PACK(struct ase_header {
Packed(struct ase_header {
u32 file_size;
u16 magic;
u16 frames;
@ -431,7 +431,7 @@ PACK(struct ase_header {
u8 _4[84];
});
PACK(struct frame_header {
Packed(struct frame_header {
u32 bytes;
u16 magic;
u16 chunks_old;
@ -440,7 +440,7 @@ PACK(struct frame_header {
u32 chunks_new;
});
INTERNAL void push_error_copy_msg(Arena *arena, Ase_ErrorList *list, String msg_src)
internal void push_error_copy_msg(Arena *arena, Ase_ErrorList *list, String msg_src)
{
Ase_Error *e = PushStruct(arena, Ase_Error);
e->msg = string_copy(arena, msg_src);
@ -492,13 +492,13 @@ struct cel {
/* Taken from
* https://github.com/RandyGaul/cute_headers/blob/master/cute_aseprite.h#L870 */
INTERNAL u32 mul_u8(u32 a, u32 b)
internal u32 mul_u8(u32 a, u32 b)
{
u32 t = (a * b) + 0x80;
return ((t >> 8) + t) >> 8;
}
INTERNAL u32 blend(u32 src, u32 dst, u8 opacity)
internal u32 blend(u32 src, u32 dst, u8 opacity)
{
u32 dst_r = (dst & 0xff);
u32 dst_g = (dst >> 8) & 0xff;
@ -524,7 +524,7 @@ INTERNAL u32 blend(u32 src, u32 dst, u8 opacity)
return r | (g << 8) | (b << 16) | (a << 24);
}
INTERNAL void make_image_dimensions_squareish(struct ase_header *header, u32 *frames_x, u32 *frames_y, u64 *image_width, u64 *image_height)
internal void make_image_dimensions_squareish(struct ase_header *header, u32 *frames_x, u32 *frames_y, u64 *image_width, u64 *image_height)
{
/* Try and get image resolution into as much of a square as possible by
* separating frames into multiple rows. */
@ -578,10 +578,10 @@ Ase_DecodedImage ase_decode_image(Arena *arena, String encoded)
u64 image_height = frame_height * frames_y;
make_image_dimensions_squareish(&ase_header, &frames_x, &frames_y, &image_width, &image_height);
res.image.width = image_width;
res.image.height = image_height;
res.width = image_width;
res.height = image_height;
/* TODO: Optimize this. Naive memset(0) is bloating the decode time for large images. */
res.image.pixels = PushArray(arena, u32, image_width * image_height);
res.pixels = PushArray(arena, u32, image_width * image_height);
u32 num_layers = 0;
struct layer *layer_head = 0;
@ -597,7 +597,7 @@ Ase_DecodedImage ase_decode_image(Arena *arena, String encoded)
u32 num_chunks = frame_header.chunks_new;
if (num_chunks == 0) {
ASSERT(frame_header.chunks_old != 0xFFFF);
Assert(frame_header.chunks_old != 0xFFFF);
num_chunks = frame_header.chunks_old;
}
@ -607,7 +607,7 @@ Ase_DecodedImage ase_decode_image(Arena *arena, String encoded)
enum chunk_type chunk_type = BB_ReadUBits(&br, 16);
/* Chunk size includes size & type */
ASSERT(chunk_size >= 6);
Assert(chunk_size >= 6);
chunk_size -= 6;
u64 chunk_end_pos = BB_GetCurrentReaderByte(&br) + chunk_size;
@ -781,7 +781,7 @@ Ase_DecodedImage ase_decode_image(Arena *arena, String encoded)
for (i32 cel_x = cel_left; cel_x < cel_right; ++cel_x) {
i32 image_x = image_left + cel_x;
u32 cel_pixel = cel->pixels[cel_x + cel_stride];
u32 *image_pixel = &res.image.pixels[image_x + image_stride];
u32 *image_pixel = &res.pixels[image_x + image_stride];
*image_pixel = blend(cel_pixel, *image_pixel, opacity);
}
}
@ -789,8 +789,8 @@ Ase_DecodedImage ase_decode_image(Arena *arena, String encoded)
}
}
/* ASSERT all data was read */
ASSERT(BB_NumBytesRemaining(&br) == 0);
/* Assert all data was read */
Assert(BB_NumBytesRemaining(&br) == 0);
abort:
@ -842,7 +842,7 @@ Ase_DecodedSheet ase_decode_sheet(Arena *arena, String encoded)
u32 num_chunks = frame_header.chunks_new;
if (num_chunks == 0) {
ASSERT(frame_header.chunks_old != 0xFFFF);
Assert(frame_header.chunks_old != 0xFFFF);
num_chunks = frame_header.chunks_old;
}
@ -874,7 +874,7 @@ Ase_DecodedSheet ase_decode_sheet(Arena *arena, String encoded)
enum chunk_type chunk_type = BB_ReadUBits(&br, 16);
/* Chunk size includes size & type */
ASSERT(chunk_size >= 6);
Assert(chunk_size >= 6);
chunk_size -= 6;
u64 chunk_end_pos = BB_GetCurrentReaderByte(&br) + chunk_size;
@ -957,8 +957,8 @@ Ase_DecodedSheet ase_decode_sheet(Arena *arena, String encoded)
++num_frames;
}
/* ASSERT all data was read */
ASSERT(BB_NumBytesRemaining(&br) == 0);
/* Assert all data was read */
Assert(BB_NumBytesRemaining(&br) == 0);
res.image_size = V2FromXY(image_width, image_height);
res.frame_size = V2FromXY(frame_width, frame_height);

View File

@ -43,7 +43,9 @@ Struct(Ase_Frame) {
};
Struct(Ase_DecodedImage) {
ImageDataRgba image;
u32 width;
u32 height;
u32 *pixels; /* Array of [width * height] pixels */
Ase_ErrorList errors;
b32 success;
};

View File

@ -5,7 +5,7 @@
#define MAX_ASSETS 1024
#define ASSET_LOOKUP_TABLE_CAPACITY (MAX_ASSETS * 4)
GLOBAL struct {
Global struct {
P_Mutex lookup_mutex;
AC_Asset lookup[ASSET_LOOKUP_TABLE_CAPACITY];
u64 num_assets;
@ -13,13 +13,13 @@ GLOBAL struct {
P_Mutex store_mutex;
Arena *store_arena;
#if RTC
#if RtcIsEnabled
/* Array of len `num_assets` pointing into populated entries of `lookup`. */
AC_Asset *dbg_table[ASSET_LOOKUP_TABLE_CAPACITY];
u64 dbg_table_count;
P_Mutex dbg_table_mutex;
#endif
} G = ZI, DEBUG_ALIAS(G, G_asset_cache);
} G = ZI, DebugAlias(G, G_asset_cache);
/* ========================== *
* Startup
@ -29,7 +29,7 @@ AC_StartupReceipt asset_cache_startup(void)
{
__prof;
/* Init store */
G.store_arena = AllocArena(GIBI(64));
G.store_arena = AllocArena(Gibi(64));
return (AC_StartupReceipt) { 0 };
}
@ -37,9 +37,9 @@ AC_StartupReceipt asset_cache_startup(void)
* Lookup
* ========================== */
INTERNAL void refresh_dbg_table(void)
internal void refresh_dbg_table(void)
{
#if RTC
#if RtcIsEnabled
P_Lock lock = P_LockE(&G.dbg_table_mutex);
MEMZERO_ARRAY(G.dbg_table);
G.dbg_table_count = 0;
@ -55,7 +55,7 @@ INTERNAL void refresh_dbg_table(void)
/* Returns first matching slot or first empty slot if not found.
* Check returned slot->hash != 0 for presence. */
INTERNAL AC_Asset *asset_cache_get_slot_locked(P_Lock *lock, String key, u64 hash)
internal AC_Asset *asset_cache_get_slot_locked(P_Lock *lock, String key, u64 hash)
{
P_AssertLockedES(lock, &G.lookup_mutex);
(UNUSED)lock;

View File

@ -24,7 +24,7 @@ Struct(AC_Asset) {
Struct(AC_Store) {
Arena *arena;
/* Internal */
/* internal */
P_Lock lock;
};

View File

@ -34,12 +34,12 @@ Arena *AllocArena(u64 reserve)
(*(volatile int *)0) = 0;
}
ASSERT(((u64)base & 0xFFF) == 0); /* Base should be 4k aligned */
STATIC_ASSERT(ArenaHeaderSize <= ArenaBlockSize); /* Header must fit in first block */
STATIC_ASSERT(sizeof(Arena) <= ArenaHeaderSize); /* Arena struct must fit in header */
Assert(((u64)base & 0xFFF) == 0); /* Base should be 4k aligned */
StaticAssert(ArenaHeaderSize <= ArenaBlockSize); /* Header must fit in first block */
StaticAssert(sizeof(Arena) <= ArenaHeaderSize); /* Arena struct must fit in header */
__profalloc(base, ArenaBlockSize);
ASAN_POISON(base + sizeof(Arena), ArenaBlockSize - sizeof(Arena));
AsanPoison(base + sizeof(Arena), ArenaBlockSize - sizeof(Arena));
gstat_add(GSTAT_MEMORY_COMMITTED, ArenaBlockSize);
gstat_add(GSTAT_NUM_ARENAS, 1);
@ -53,7 +53,7 @@ Arena *AllocArena(u64 reserve)
void ReleaseArena(Arena *arena)
{
ASAN_UNPOISON(arena, arena->committed + ArenaHeaderSize);
AsanUnpoison(arena, arena->committed + ArenaHeaderSize);
__prof;
__proffree(arena);
gstat_add(GSTAT_MEMORY_COMMITTED, -(i64)(arena->committed - ArenaHeaderSize));
@ -65,8 +65,8 @@ void ReleaseArena(Arena *arena)
/* NOTE: Application will exit if arena fails to commit memory */
void *PushBytesNoZero(Arena *arena, u64 size, u64 align)
{
ASSERT(align > 0);
ASSERT(!arena->readonly);
Assert(align > 0);
Assert(!arena->readonly);
void *ptr = 0;
u8 *base = ArenaBase(arena);
@ -104,11 +104,11 @@ void *PushBytesNoZero(Arena *arena, u64 size, u64 align)
gstat_add(GSTAT_MEMORY_COMMITTED, commit_bytes);
__proffree(arena);
__profalloc(arena, arena->committed + ArenaHeaderSize);
ASAN_POISON(commit_address, commit_bytes);
AsanPoison(commit_address, commit_bytes);
}
ptr = base + aligned_start_pos;
ASAN_UNPOISON(ptr, new_pos - aligned_start_pos);
AsanUnpoison(ptr, new_pos - aligned_start_pos);
arena->pos = new_pos;
}
else
@ -133,13 +133,13 @@ void CopyArena(Arena *dst, Arena *src)
void ShrinkArena(Arena *arena)
{
/* Not implemented */
ASSERT(0);
Assert(0);
(UNUSED)arena;
}
void SetArenaReadonly(Arena *arena)
{
#if RTC
#if RtcIsEnabled
arena->readonly = 1;
#endif
memory_set_committed_readonly(arena, arena->committed + ArenaHeaderSize);
@ -148,7 +148,7 @@ void SetArenaReadonly(Arena *arena)
void SetArenaReadWrite(Arena *arena)
{
memory_set_committed_readwrite(arena, arena->committed + ArenaHeaderSize);
#if RTC
#if RtcIsEnabled
arena->readonly = 0;
#endif
}

View File

@ -9,7 +9,7 @@ Struct(Arena)
u64 pos;
u64 committed;
u64 reserved;
#if RTC
#if RtcIsEnabled
b32 readonly;
#endif
};
@ -18,7 +18,7 @@ Struct(TempArena) {
Arena *arena;
u64 start_pos;
#if RTC
#if RtcIsEnabled
u64 scratch_id;
#endif
};
@ -61,41 +61,41 @@ extern SharedScratchCtx shared_scratch_ctx;
void *PushBytesNoZero(Arena *arena, u64 size, u64 align);
INLINE void *PushBytes(Arena *arena, u64 size, u64 align)
Inline void *PushBytes(Arena *arena, u64 size, u64 align)
{
void *p = PushBytesNoZero(arena, size, align);
MEMZERO(p, size);
return p;
}
INLINE u8 *ArenaBase(Arena *arena)
Inline u8 *ArenaBase(Arena *arena)
{
return (u8 *)arena + ArenaHeaderSize;
}
INLINE void PopTo(Arena *arena, u64 pos)
Inline void PopTo(Arena *arena, u64 pos)
{
ASSERT(arena->pos >= pos);
ASSERT(!arena->readonly);
Assert(arena->pos >= pos);
Assert(!arena->readonly);
ASAN_POISON(ArenaBase(arena) + pos, arena->pos - pos);
AsanPoison(ArenaBase(arena) + pos, arena->pos - pos);
arena->pos = pos;
}
INLINE void PopBytes(Arena *arena, u64 size, void *copy_dst)
Inline void PopBytes(Arena *arena, u64 size, void *copy_dst)
{
ASSERT(arena->pos >= size);
ASSERT(!arena->readonly);
Assert(arena->pos >= size);
Assert(!arena->readonly);
u64 new_pos = arena->pos - size;
void *src = (void *)(ArenaBase(arena) + new_pos);
MEMCPY(copy_dst, src, size);
ASAN_POISON(ArenaBase(arena) + new_pos, arena->pos - new_pos);
AsanPoison(ArenaBase(arena) + new_pos, arena->pos - new_pos);
arena->pos = new_pos;
}
INLINE void *_PushDry(Arena *arena, u64 align)
Inline void *_PushDry(Arena *arena, u64 align)
{
u64 aligned_start_pos = (arena->pos + (align - 1));
aligned_start_pos -= aligned_start_pos % align;
@ -114,9 +114,9 @@ void ShrinkArena(Arena *arena);
void SetArenaReadonly(Arena *arena);
void SetArenaReadWrite(Arena *arena);
INLINE void *AlignArena(Arena *arena, u64 align)
Inline void *AlignArena(Arena *arena, u64 align)
{
ASSERT(!arena->readonly);
Assert(!arena->readonly);
if (align > 0) {
u64 aligned_start_pos = (arena->pos + (align - 1));
aligned_start_pos -= aligned_start_pos % align;
@ -128,12 +128,12 @@ INLINE void *AlignArena(Arena *arena, u64 align)
}
} else {
/* 0 alignment */
ASSERT(0);
Assert(0);
return (void *)(ArenaBase(arena) + arena->pos);
}
}
INLINE void ResetArena(Arena *arena)
Inline void ResetArena(Arena *arena)
{
PopTo(arena, 0);
}
@ -141,7 +141,7 @@ INLINE void ResetArena(Arena *arena)
////////////////////////////////
//~ Temp arena
INLINE TempArena BeginTempArena(Arena *arena)
Inline TempArena BeginTempArena(Arena *arena)
{
TempArena t = ZI;
t.arena = arena;
@ -149,7 +149,7 @@ INLINE TempArena BeginTempArena(Arena *arena)
return t;
}
INLINE void EndTempArena(TempArena temp)
Inline void EndTempArena(TempArena temp)
{
PopTo(temp.arena, temp.start_pos);
}
@ -157,13 +157,13 @@ INLINE void EndTempArena(TempArena temp)
////////////////////////////////
//~ Scratch
INLINE ScratchCtx *ScratchCtxFromFiberId(i16 fiber_id)
Inline ScratchCtx *ScratchCtxFromFiberId(i16 fiber_id)
{
SharedScratchCtx *shared = &shared_scratch_ctx;
ScratchCtx *ctx = &shared->scratch_contexts[fiber_id];
if (!ctx->arenas[0]) {
for (i32 i = 0; i < (i32)countof(ctx->arenas); ++i) {
ctx->arenas[i] = AllocArena(GIBI(64));
ctx->arenas[i] = AllocArena(Gibi(64));
}
}
return ctx;
@ -179,13 +179,13 @@ INLINE ScratchCtx *ScratchCtxFromFiberId(i16 fiber_id)
* scope that could potentially be a scratch arena from another scope. */
#define BeginScratch(potential_conflict) _BeginScratch(potential_conflict)
INLINE TempArena _BeginScratch(Arena *potential_conflict)
Inline TempArena _BeginScratch(Arena *potential_conflict)
{
/* This function is currently hard-coded to support 2 scratch arenas */
STATIC_ASSERT(ScratchArenasPerCtx == 2);
StaticAssert(ScratchArenasPerCtx == 2);
/* Use `BeginScratchNoConflict` if no conflicts are present */
ASSERT(potential_conflict != 0);
Assert(potential_conflict != 0);
ScratchCtx *ctx = ScratchCtxFromFiberId(FiberId());
Arena *scratch_arena = ctx->arenas[0];
@ -208,7 +208,7 @@ INLINE TempArena _BeginScratch(Arena *potential_conflict)
(UNUSED)arena; \
} while (0)
INLINE TempArena _BeginScratchNoConflict(void)
Inline TempArena _BeginScratchNoConflict(void)
{
ScratchCtx *ctx = ScratchCtxFromFiberId(FiberId());
Arena *scratch_arena = ctx->arenas[0];
@ -216,7 +216,7 @@ INLINE TempArena _BeginScratchNoConflict(void)
return temp;
}
INLINE void EndScratch(TempArena scratch_temp)
Inline void EndScratch(TempArena scratch_temp)
{
EndTempArena(scratch_temp);
}

View File

@ -31,57 +31,57 @@ AlignedStruct(Atomic8Padded, 64)
Atomic8 v;
u8 _pad[60];
};
STATIC_ASSERT(sizeof(Atomic8Padded) == 64 && alignof(Atomic8Padded) == 64);
StaticAssert(sizeof(Atomic8Padded) == 64 && alignof(Atomic8Padded) == 64);
AlignedStruct(Atomic16Padded, 64)
{
Atomic16 v;
u8 _pad[60];
};
STATIC_ASSERT(sizeof(Atomic16Padded) == 64 && alignof(Atomic16Padded) == 64);
StaticAssert(sizeof(Atomic16Padded) == 64 && alignof(Atomic16Padded) == 64);
AlignedStruct(Atomic32Padded, 64)
{
Atomic32 v;
u8 _pad[60];
};
STATIC_ASSERT(sizeof(Atomic32Padded) == 64 && alignof(Atomic32Padded) == 64);
StaticAssert(sizeof(Atomic32Padded) == 64 && alignof(Atomic32Padded) == 64);
AlignedStruct(Atomic64Padded, 64)
{
Atomic64 v;
u8 _pad[56];
};
STATIC_ASSERT(sizeof(Atomic64Padded) == 64 && alignof(Atomic64Padded) == 64);
StaticAssert(sizeof(Atomic64Padded) == 64 && alignof(Atomic64Padded) == 64);
////////////////////////////////
//~ Atomic impl
#if PLATFORM_WINDOWS
#if PlatformIsWindows
FORCE_INLINE i8 atomic8_fetch(Atomic8 *x) { return (i8)_InterlockedCompareExchange8((char *)&x->_v, 0, 0); }
FORCE_INLINE i8 atomic8_fetch_set(Atomic8 *x, i8 e) { return (i8)_InterlockedExchange8((char *)&x->_v, e); }
FORCE_INLINE i8 atomic8_fetch_test_set(Atomic8 *x, i8 c, i8 e) { return (i8)_InterlockedCompareExchange8((char *)&x->_v, e, c); }
FORCE_INLINE i8 atomic8_fetch_xor(Atomic8 *x, i8 c) { return (i8)_InterlockedXor8((char *)&x->_v, c); }
FORCE_INLINE i8 atomic8_fetch_add(Atomic8 *x, i8 a) { return (i8)_InterlockedExchangeAdd8((char *)&x->_v, a); }
ForceInline i8 atomic8_fetch(Atomic8 *x) { return (i8)_InterlockedCompareExchange8((char *)&x->_v, 0, 0); }
ForceInline i8 atomic8_fetch_set(Atomic8 *x, i8 e) { return (i8)_InterlockedExchange8((char *)&x->_v, e); }
ForceInline i8 atomic8_fetch_test_set(Atomic8 *x, i8 c, i8 e) { return (i8)_InterlockedCompareExchange8((char *)&x->_v, e, c); }
ForceInline i8 atomic8_fetch_xor(Atomic8 *x, i8 c) { return (i8)_InterlockedXor8((char *)&x->_v, c); }
ForceInline i8 atomic8_fetch_add(Atomic8 *x, i8 a) { return (i8)_InterlockedExchangeAdd8((char *)&x->_v, a); }
FORCE_INLINE i16 atomic16_fetch(Atomic16 *x) { return (i16)_InterlockedCompareExchange16(&x->_v, 0, 0); }
FORCE_INLINE i16 atomic16_fetch_set(Atomic16 *x, i16 e) { return (i16)_InterlockedExchange16(&x->_v, e); }
FORCE_INLINE i16 atomic16_fetch_test_set(Atomic16 *x, i16 c, i16 e) { return (i16)_InterlockedCompareExchange16(&x->_v, e, c); }
FORCE_INLINE i16 atomic16_fetch_xor(Atomic16 *x, i16 c) { return (i16)_InterlockedXor16(&x->_v, c); }
FORCE_INLINE i16 atomic16_fetch_add(Atomic16 *x, i16 a) { return (i16)_InterlockedExchangeAdd16(&x->_v, a); }
ForceInline i16 atomic16_fetch(Atomic16 *x) { return (i16)_InterlockedCompareExchange16(&x->_v, 0, 0); }
ForceInline i16 atomic16_fetch_set(Atomic16 *x, i16 e) { return (i16)_InterlockedExchange16(&x->_v, e); }
ForceInline i16 atomic16_fetch_test_set(Atomic16 *x, i16 c, i16 e) { return (i16)_InterlockedCompareExchange16(&x->_v, e, c); }
ForceInline i16 atomic16_fetch_xor(Atomic16 *x, i16 c) { return (i16)_InterlockedXor16(&x->_v, c); }
ForceInline i16 atomic16_fetch_add(Atomic16 *x, i16 a) { return (i16)_InterlockedExchangeAdd16(&x->_v, a); }
FORCE_INLINE i32 atomic32_fetch(Atomic32 *x) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); }
FORCE_INLINE i32 atomic32_fetch_set(Atomic32 *x, i32 e) { return (i32)_InterlockedExchange((volatile long *)&x->_v, e); }
FORCE_INLINE i32 atomic32_fetch_test_set(Atomic32 *x, i32 c, i32 e) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, e, c); }
FORCE_INLINE i32 atomic32_fetch_xor(Atomic32 *x, i32 c) { return (i32)_InterlockedXor((volatile long *)&x->_v, c); }
FORCE_INLINE i32 atomic32_fetch_add(Atomic32 *x, i32 a) { return (i32)_InterlockedExchangeAdd((volatile long *)&x->_v, a); }
ForceInline i32 atomic32_fetch(Atomic32 *x) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, 0, 0); }
ForceInline i32 atomic32_fetch_set(Atomic32 *x, i32 e) { return (i32)_InterlockedExchange((volatile long *)&x->_v, e); }
ForceInline i32 atomic32_fetch_test_set(Atomic32 *x, i32 c, i32 e) { return (i32)_InterlockedCompareExchange((volatile long *)&x->_v, e, c); }
ForceInline i32 atomic32_fetch_xor(Atomic32 *x, i32 c) { return (i32)_InterlockedXor((volatile long *)&x->_v, c); }
ForceInline i32 atomic32_fetch_add(Atomic32 *x, i32 a) { return (i32)_InterlockedExchangeAdd((volatile long *)&x->_v, a); }
FORCE_INLINE i64 atomic64_fetch(Atomic64 *x) { return (i64)_InterlockedCompareExchange64(&x->_v, 0, 0); }
FORCE_INLINE i64 atomic64_fetch_set(Atomic64 *x, i64 e) { return (i64)_InterlockedExchange64(&x->_v, e); }
FORCE_INLINE i64 atomic64_fetch_test_set(Atomic64 *x, i64 c, i64 e) { return (i64)_InterlockedCompareExchange64(&x->_v, e, c); }
FORCE_INLINE i64 atomic64_fetch_xor(Atomic64 *x, i64 c) { return (i64)_InterlockedXor64(&x->_v, c); }
FORCE_INLINE i64 atomic64_fetch_add(Atomic64 *x, i64 a) { return (i64)_InterlockedExchangeAdd64(&x->_v, a); }
ForceInline i64 atomic64_fetch(Atomic64 *x) { return (i64)_InterlockedCompareExchange64(&x->_v, 0, 0); }
ForceInline i64 atomic64_fetch_set(Atomic64 *x, i64 e) { return (i64)_InterlockedExchange64(&x->_v, e); }
ForceInline i64 atomic64_fetch_test_set(Atomic64 *x, i64 c, i64 e) { return (i64)_InterlockedCompareExchange64(&x->_v, e, c); }
ForceInline i64 atomic64_fetch_xor(Atomic64 *x, i64 c) { return (i64)_InterlockedXor64(&x->_v, c); }
ForceInline i64 atomic64_fetch_add(Atomic64 *x, i64 a) { return (i64)_InterlockedExchangeAdd64(&x->_v, a); }
#else
# error Atomics not implemented

View File

@ -6,7 +6,7 @@
BuddyCtx *AllocBuddyCtx(u64 reserve)
{
/* TODO: Determine meta reserve dynamically */
Arena *meta_arena = AllocArena(GIBI(64));
Arena *meta_arena = AllocArena(Gibi(64));
BuddyCtx *ctx = PushStruct(meta_arena, BuddyCtx);
ctx->meta_arena = meta_arena;
ctx->data_arena = AllocArena(reserve);
@ -38,7 +38,7 @@ BuddyBlock *AllocBuddyBlock(BuddyCtx *ctx, u64 size)
if (size > 0x00FFFFFFFFFFFFFFULL)
{
/* TODO: Error */
ASSERT(0);
Assert(0);
}
/* TODO: Minimum block size */
@ -161,7 +161,7 @@ BuddyBlock *GetUnusedBuddyBlock(BuddyCtx *ctx, BuddyLevel *level)
left->parent = parent_block;
left->memory = parent_block->memory;
/* Create right (unused) block from parent block */
/* Create right (UNUSED) block from parent block */
BuddyBlock *right = PushBuddyBlock(ctx);
right->is_used = 0;
right->level = level;
@ -187,7 +187,7 @@ BuddyBlock *GetUnusedBuddyBlock(BuddyCtx *ctx, BuddyLevel *level)
if (level_commit_diff > 0)
{
PushArrayNoZero(arena, u8, level_commit_diff);
ASSERT(arena->pos == (level->size * 2));
Assert(arena->pos == (level->size * 2));
}
/* Create left (used) block from existing child block memory */
@ -196,7 +196,7 @@ BuddyBlock *GetUnusedBuddyBlock(BuddyCtx *ctx, BuddyLevel *level)
left->level = level;
left->memory = ArenaBase(arena);
/* Create right (unused) block from new arena memory */
/* Create right (UNUSED) block from new arena memory */
BuddyBlock *right = PushBuddyBlock(ctx);
right->is_used = 0;
right->level = level;

View File

@ -1,260 +1,265 @@
/* This header is precompiled and automatically included into all source files */
/* NOTE: Include guards disabled since it breaks editor parsing */
//#ifndef COMMON_H
//#define COMMON_H
#ifdef __cplusplus
extern "C" {
#endif
/* ========================== *
* Flag defaults
* ========================== */
////////////////////////////////
//~ Compiler feature flags
/* Compile definition defaults */
#ifndef RTC
# define RTC 0
#ifndef RtcIsEnabled
# define RtcIsEnabled 0
#endif
#ifndef ASAN
# define ASAN 0
#ifndef AsanIsEnabled
# define AsanIsEnabled 0
#endif
#ifndef CRTLIB
# define CRTLIB 0
#ifndef CrtlibIsEnabled
# define CrtlibIsEnabled 0
#endif
#ifndef DEBINFO
# define DEBINFO 0
#ifndef DebinfoEnabled
# define DebinfoEnabled 0
#endif
#ifndef DEVELOPER
# define DEVELOPER 0
#ifndef DeveloperIsEnabled
# define DeveloperIsEnabled 0
#endif
#ifndef PROFILING
# define PROFILING 0
#ifndef ProfilingIsEnabled
# define ProfilingIsEnabled 0
#endif
#ifndef UNOPTIMIZED
# define UNOPTIMIZED 0
#ifndef UnoptimizedIsEnabled
# define UnoptimizedIsEnabled 0
#endif
#ifndef RUN_TESTS
# define RUN_TESTS 0
#ifndef TestsAreEnabled
# define TestsAreEnabled 0
#endif
#ifndef INCBIN_DIR_RAW
# define INCBIN_DIR ""
#ifndef IncbinRawDir
# define IncbinDir ""
#else
# define INCBIN_DIR STRINGIZE(INCBIN_DIR_RAW)
# define IncbinDir Stringize(IncbinRawDir)
#endif
/* ========================== *
* Machine context
* ========================== */
////////////////////////////////
//~ Machine context
/* Compiler */
//- Compiler
#if defined(__clang__)
# define COMPILER_CLANG 1
# define COMPILER_MSVC 0
# define CompilerIsClang 1
# define CompilerIsMsvc 0
#elif defined(_MSC_VER)
# define COMPILER_CLANG 0
# define COMPILER_MSVC 1
# define CompilerIsClang 0
# define CompilerIsMsvc 1
#else
# error Unknown compiler
#endif
/* Operating system */
//- Operating system
#if defined(_WIN32)
# define PLATFORM_WINDOWS 1
# define PLATFORM_MAC 0
# define PLATFORM_LINUX 0
# define PlatformIsWindows 1
# define PlatformIsMac 0
# define PlatformIsLinux 0
#elif defined(__APPLE__) && defined(__MACH__)
# define PLATFORM_WINDOWS 0
# define PLATFORM_MAC 1
# define PLATFORM_LINUX 0
# define PlatformIsWindows 0
# define PlatformIsMac 1
# define PlatformIsLinux 0
#elif defined(__gnu_linux__)
# define PLATFORM_WINDOWS 0
# define PLATFORM_MAC 0
# define PLATFORM_LINUX 1
# define PlatformIsWindows 0
# define PlatformIsMac 0
# define PlatformIsLinux 1
#else
# error Unknown platform
#endif
#if defined(__cplusplus)
# define LANGUAGE_CPP 1
# define LANGUAGE_C 0
# define LanguageIsCpp 1
# define LanguageIsC 0
#else
# define LANGUAGE_CPP 0
# define LANGUAGE_C 1
# define LanguageIsCpp 0
# define LanguageIsC 1
#endif
/* ========================== *
* Debug
* ========================== */
////////////////////////////////
//~ Debug
/* Compile time assert */
#if COMPILER_MSVC || (LANGUAGE_C && __STDC_VERSION__ < 202311L)
# if COMPILER_MSVC
# define STATIC_ASSERT3(cond, line) struct STATIC_ASSERT_____##line {int foo[(cond) ? 1 : -1];}
# define STATIC_ASSERT2(cond, line) STATIC_ASSERT3(cond, line)
# define STATIC_ASSERT(cond) STATIC_ASSERT2(cond, __LINE__)
//- Compile time assert
#if CompilerIsMsvc || (LanguageIsC && __STDC_VERSION__ < 202311L)
# if CompilerIsMsvc
# define StaticAssert2(cond, line) struct STATIC_ASSERT_____##line {int foo[(cond) ? 1 : -1];}
# define StaticAssert1(cond, line) StaticAssert2(cond, line)
# define StaticAssert(cond) StaticAssert1(cond, __LINE__)
# else
# define STATIC_ASSERT(cond) _Static_assert(cond, "")
# define StaticAssert(cond) _Static_assert(cond, "")
# endif
#else
# define STATIC_ASSERT(c) static_assert(c, "")
# define StaticAssert(c) static_assert(c, "")
#endif
#if COMPILER_MSVC
# if DEBINFO
# define DEBUG_ALIAS(var, alias) *(alias) = &(var)
//- Debug alias
#if CompilerIsMsvc
# if DebinfoEnabled
# define DebugAlias(var, alias) *(alias) = &(var)
# else
# define DEBUG_ALIAS(var, alias) *(alias) = &(var)
# define DebugAlias(var, alias) *(alias) = &(var)
# endif
#else
# if DEBINFO
# define DEBUG_ALIAS(var, alias) __attribute((used)) *(alias) = &(var)
# if DebinfoEnabled
# define DebugAlias(var, alias) __attribute((used)) *(alias) = &(var)
# else
# define DEBUG_ALIAS(var, alias) __attribute((unused)) *(alias) = &(var)
# define DebugAlias(var, alias) __attribute((unused)) *(alias) = &(var)
# endif
#endif
#if RTC
# if COMPILER_MSVC
# define ASSERT(cond) ((cond) ? 1 : ((*(volatile int *)0) = 0, 0))
//- Runtime assert
#if RtcIsEnabled
# if CompilerIsMsvc
# define Assert(cond) ((cond) ? 1 : ((*(volatile int *)0) = 0, 0))
# define DEBUGBREAK __debugbreak
# else
# define ASSERT(cond) ((cond) ? 1 : (__builtin_trap(), 0))
# define Assert(cond) ((cond) ? 1 : (__builtin_trap(), 0))
# define DEBUGBREAK __builtin_debugtrap()
# endif
# define DEBUGBREAKABLE { volatile i32 __DEBUGBREAKABLE_VAR = 0; (UNUSED) __DEBUGBREAKABLE_VAR; } (void)0
#else
# define ASSERT(cond) (void)(0)
# define Assert(cond) (void)(0)
#endif
/* Address sanitization */
#if ASAN
//- Address sanitization
#if AsanIsEnabled
void __asan_poison_memory_region(void const volatile *, size_t);
void __asan_unpoison_memory_region(void const volatile *add, size_t);
# define ASAN_POISON(addr, size) __asan_poison_memory_region((addr), (size))
# define ASAN_UNPOISON(addr, size) __asan_unpoison_memory_region((addr), (size))
# define AsanPoison(addr, size) __asan_poison_memory_region((addr), (size))
# define AsanUnpoison(addr, size) __asan_unpoison_memory_region((addr), (size))
#else
# define ASAN_POISON(addr, size)
# define ASAN_UNPOISON(addr, size)
# define AsanPoison(addr, size)
# define AsanUnpoison(addr, size)
#endif
/* ========================== *
* Common macros
* ========================== */
////////////////////////////////
//~ Common utility macros
#if COMPILER_MSVC && LANGUAGE_CPP
# define CPPCOMPAT_INITLIST_TYPE(type)
//- Initlist compatibility
#if CompilerIsMsvc && LanguageIsCpp
# define CppCompatInitListType(type)
#else
# define CPPCOMPAT_INITLIST_TYPE(type) (type)
# define CppCompatInitListType(type) (type)
#endif
/* Zero initialization macro */
#if LANGUAGE_C
//- Zero initialization macro
#if LanguageIsC
# define ZI { 0 }
#else
# define ZI { }
#endif
#define INLINE static inline
//- Inline
#define Inline static inline
#if COMPILER_MSVC
# define FORCE_INLINE inline __forceinline
#if CompilerIsMsvc
# define ForceInline Inline __forceinline
#else
# define FORCE_INLINE inline __attribute((always_inline))
# define ForceInline Inline __attribute((always_inline))
#endif
#if COMPILER_MSVC
# define FORCE_NO_INLINE __declspec(noinline)
#if CompilerIsMsvc
# define ForceNoInline __declspec(noinline)
#else
# define FORCE_NO_INLINE __attribute__((noinline))
# define ForceNoInline __attribute__((noinline))
#endif
/* Separate `static` usage into different keywords for easier grepping */
#define LOCAL_PERSIST static
#define INTERNAL static
#define GLOBAL static
//- Static
#define LocalPersist static
#define internal static
#define Global static
/* Read-only */
#if PLATFORM_WINDOWS
# if COMPILER_MSVC
//- Read-only
#if PlatformIsWindows
# if CompilerIsMsvc
# pragma section(".rdata$", read)
# define READONLY __declspec(allocate(".rdata$"))
# define Readonly __declspec(allocate(".rdata$"))
# else
# define READONLY __declspec(allocate(".rdata"))
# define Readonly __declspec(allocate(".rdata"))
# endif
#elif PLATFORM_MAC
# define READONLY __attribute((section("__TEXT,__const")))
#elif PlatformIsMac
# define Readonly __attribute((section("__TEXT,__const")))
#else
# define READONLY __attribute((section(".rodata")))
# define Readonly __attribute((section(".rodata")))
#endif
/* Markup */
//- Barriers
#if CompilerIsMsvc
# define WriteBarrier() _WriteBarrier()
# define ReadBarrier() _ReadBarrier()
#elif defined(__x86_64) || defined(__i386__)
# define WriteBarrier() __asm__ volatile("" ::: "memory")
# define ReadBarrier() __asm__ volatile("" ::: "memory")
#else
# error Memory barriers not implemented
#endif
//- Markup
#define UNUSED void
#if COMPILER_MSVC
# if LANGUAGE_CPP
#if CompilerIsMsvc
# if LanguageIsCpp
# define FALLTHROUGH [[fallthrough]]
# else
# define FALLTHROUGH
# endif
#elif COMPILER_CLANG
#elif CompilerIsClang
# define FALLTHROUGH __attribute((fallthrough))
#else
# define FALLTHROUGH
#endif
/* Sizes */
#define KIBI(n) (n*1024ULL)
#define MEBI(n) (n*KIBI(1024ULL))
#define GIBI(n) (n*MEBI(1024ULL))
#define TEBI(n) (n*GIBI(1024ULL))
//- Preprocessor concatenation
#define Cat1(a, b) a ## b
#define Cat(a, b) Cat1(a, b)
/* Time */
#define NS_FROM_SECONDS(s) ((i64)((s) * 1000000000.0))
#define SECONDS_FROM_NS(ns) ((f64)(ns) / 1000000000.0)
//- Preprocessor stringization
#define Stringize1(x) #x
#define Stringize(x) Stringize1(x)
/* countof */
#define countof(a) (sizeof(a) / sizeof((a)[0]))
//- Sizes
#define Kibi(n) (n*1024ULL)
#define Mebi(n) (n*Kibi(1024ULL))
#define Gibi(n) (n*Mebi(1024ULL))
#define Tebi(n) (n*Gibi(1024ULL))
/* typeof */
#if COMPILER_MSVC
//- Time
#define NsFromSeconds(s) ((i64)((s) * 1000000000.0))
#define SecondsFromNs(ns) ((f64)(ns) / 1000000000.0)
////////////////////////////////
//~ Type helper macros
//- typeof
#if CompilerIsMsvc
/* Typeof not supported in MSVC */
# define TYPEOF_DEFINED 0
# define typeof(type) ASSERT(0)
# define TypeofIsDefined 0
# define typeof(type) Assert(0)
#else
# define TYPEOF_DEFINED 1
# if LANGUAGE_CPP || (__STDC_VERSION__ < 202311L)
# define TypeofIsDefined 1
# if LanguageIsCpp || (__STDC_VERSION__ < 202311L)
# define typeof(type) __typeof__(type)
# endif
#endif
/* alignof */
#if (COMPILER_MSVC && LANGUAGE_C) || (LANGUAGE_C && (__STDC_VERSION__ < 202311L))
//- alignof
#if (CompilerIsMsvc && LanguageIsC) || (LanguageIsC && (__STDC_VERSION__ < 202311L))
# define alignof(type) __alignof(type)
#endif
/* alignas */
#if (COMPILER_MSVC && LANGUAGE_C) || (LANGUAGE_C && __STDC_VERSION__ < 202311L)
# if COMPILER_MSVC
# define alignas(n) __declspec(align(n))
# else
# define alignas(n) __attribute__((aligned(n)))
# endif
#endif
/* Field macros */
#define FIELD_SIZEOF(type, field) sizeof(((type *)0)->field)
//- sizeof_field
#define sizeof_field(type, field) sizeof(((type *)0)->field)
//- offsetof
#if 0
#if !COMPILER_MSVC
#if !CompilerIsMsvc
# if !defined _CRT_USE_BUILTIN_OFFSETOF
# define offsetof(type, field) ((u64)&(((type *)0)->field))
# else
@ -263,60 +268,67 @@ void __asan_unpoison_memory_region(void const volatile *add, size_t);
#endif
#endif
/* Array */
#define IS_INDEXABLE(a) (sizeof(a[0]))
#define IS_ARRAY(a) (IS_INDEXABLE(a) && (((void *)&a) == ((void *)a)))
////////////////////////////////
//~ Array helper macros
/* Pack */
#if COMPILER_MSVC
# define PACK(s) __pragma(pack(push, 1)) s __pragma(pack(pop))
//- countof
#define countof(a) (sizeof(a) / sizeof((a)[0]))
#define IsIndexable(a) (sizeof(a[0]))
#define IsArray(a) (IsIndexable(a) && (((void *)&a) == ((void *)a)))
////////////////////////////////
//~ Struct alignment / padding macros
//- Pack
#if CompilerIsMsvc
# define Packed(s) __pragma(pack(push, 1)) s __pragma(pack(pop))
#else
# define PACK(s) s __attribute((__packed__))
# define Packed(s) s __attribute((__packed__))
#endif
/* Color */
#define RGB32(r, g, b) RGBA32((r), (g), (b), 0xFF)
#define RGBA32(r, g, b, a) (u32)((u32)(r) | ((u32)(g) << 8) | ((u32)(b) << 16) | ((u32)(a) << 24))
#define BGR32(rgb) ((((rgb >> 0) & 0xFF) << 16) | (((rgb >> 8) & 0xFF) << 8) | (((rgb >> 16) & 0xFF) << 0))
#define _RGB32_U8_FROM_F(fl) ((u8)((fl * 255.0) + 0.5))
#define RGBA32_F(r, g, b, a) RGBA32(_RGB32_U8_FROM_F((r)), _RGB32_U8_FROM_F((g)), _RGB32_U8_FROM_F((b)), _RGB32_U8_FROM_F((a)))
#define RGB32_F(r, g, b) RGBA32_F((r), (g), (b), 1.f)
#define ALPHA32_F(color, a) ((color) & 0x00FFFFFF) | (_RGB32_U8_FROM_F((a)) << 24)
/* Color defines */
#define COLOR_WHITE RGB32(0xFF, 0xFF, 0xFF)
#define COLOR_BLACK RGB32(0x00, 0x00, 0x00)
#define COLOR_RED RGB32(0xFF, 0x00, 0x00)
#define COLOR_GREEN RGB32(0x00, 0xFF, 0x00)
#define COLOR_BLUE RGB32(0x00, 0x00, 0xFF)
#define COLOR_YELLOW RGB32(0xFF, 0xFF, 0x00)
#define COLOR_ORANGE RGB32(0xFF, 0xA5, 0x00)
#define COLOR_PURPLE RGB32(0xFF, 0x00, 0XFF)
/* Barrier */
#if COMPILER_MSVC
# define WRITE_BARRIER() _WriteBarrier()
# define READ_BARRIER() _ReadBarrier()
#elif defined(__x86_64) || defined(__i386__)
# define WRITE_BARRIER() __asm__ volatile("" ::: "memory")
# define READ_BARRIER() __asm__ volatile("" ::: "memory")
#else
# error Memory barriers not implemented
//- alignas
#if (CompilerIsMsvc && LanguageIsC) || (LanguageIsC && __STDC_VERSION__ < 202311L)
# if CompilerIsMsvc
# define alignas(n) __declspec(align(n))
# else
# define alignas(n) __attribute__((aligned(n)))
# endif
#endif
/* Cat */
#define CAT1(a, b) a ## b
#define CAT(a, b) CAT1(a, b)
////////////////////////////////
//~ Color helper macros
/* Stringize */
#define STRINGIZE2(x) #x
#define STRINGIZE(x) STRINGIZE2(x)
//- Rgba 32 bit helpers
#define Rgb32(r, g, b) Rgba32((r), (g), (b), 0xFF)
#define Rgba32(r, g, b, a) (u32)((u32)(r) | ((u32)(g) << 8) | ((u32)(b) << 16) | ((u32)(a) << 24))
#define Bgr32(rgb) ((((rgb >> 0) & 0xFF) << 16) | (((rgb >> 8) & 0xFF) << 8) | (((rgb >> 16) & 0xFF) << 0))
/* ========================== *
* Primitive types
* ========================== */
//- Rgba 32 bit float float helpers
#define _Rgb32U8FromF(fl) ((u8)((fl * 255.0) + 0.5))
#define Rgba32F(r, g, b, a) Rgba32(_Rgb32U8FromF((r)), _Rgb32U8FromF((g)), _Rgb32U8FromF((b)), _Rgb32U8FromF((a)))
#define Rgb32F(r, g, b) Rgba32F((r), (g), (b), 1.f)
#define Alpha32F(color, a) ((color) & 0x00FFFFFF) | (_Rgb32U8FromF((a)) << 24)
//- Pre-defined colors
#define ColorWhite Rgb32(0xFF, 0xFF, 0xFF)
#define ColorBlack Rgb32(0x00, 0x00, 0x00)
#define ColorRed Rgb32(0xFF, 0x00, 0x00)
#define ColorGreen Rgb32(0x00, 0xFF, 0x00)
#define ColorBlue Rgb32(0x00, 0x00, 0xFF)
#define ColorYellow Rgb32(0xFF, 0xFF, 0x00)
#define ColorOrange Rgb32(0xFF, 0xA5, 0x00)
#define ColorPurple Rgb32(0xFF, 0x00, 0XFF)
////////////////////////////////
//~ Struct helper macros
#define Struct(name) typedef struct name name; struct name
#define AlignedStruct(name, n) typedef struct name name; struct alignas(n) name
////////////////////////////////
//~ Scalar types
typedef int8_t i8;
typedef int16_t i16;
@ -331,59 +343,39 @@ typedef double f64;
typedef i8 b8;
typedef i32 b32;
#define U8_MAX (0xFF)
#define U16_MAX (0xFFFF)
#define U32_MAX (0xFFFFFFFF)
#define U64_MAX (0xFFFFFFFFFFFFFFFFULL)
#define U8Max (0xFF)
#define U16Max (0xFFFF)
#define U32Max (0xFFFFFFFF)
#define U64Max (0xFFFFFFFFFFFFFFFFULL)
#define I8_MAX (0x7F)
#define I16_MAX (0x7FFF)
#define I32_MAX (0x7FFFFFFF)
#define I64_MAX (0x7FFFFFFFFFFFFFFFLL)
#define I8Max (0x7F)
#define I16Max (0x7FFF)
#define I32Max (0x7FFFFFFF)
#define I64Max (0x7FFFFFFFFFFFFFFFLL)
#define I8_MIN ((i8)-0x80)
#define I16_MIN ((i16)0x8000)
#define I32_MIN ((i32)0x80000000)
#define I64_MIN ((i64)0x8000000000000000LL)
#define I8Min ((i8)-0x80)
#define I16Min ((i16)0x8000)
#define I32Min ((i32)0x80000000)
#define I64Min ((i64)0x8000000000000000LL)
GLOBAL const u32 _f32_infinity_u32 = 0x7f800000;
GLOBAL const f32 *_f32_infinity = (f32 *)&_f32_infinity_u32;
Global const u32 _f32_infinity_u32 = 0x7f800000;
Global const f32 *_f32_infinity = (f32 *)&_f32_infinity_u32;
#define F32_INFINITY (*_f32_infinity)
GLOBAL const u64 _f64_infinity_u64 = 0x7ff0000000000000ULL;
GLOBAL const f64 *_f64_infinity = (f64 *)&_f64_infinity_u64;
Global const u64 _f64_infinity_u64 = 0x7ff0000000000000ULL;
Global const f64 *_f64_infinity = (f64 *)&_f64_infinity_u64;
#define F64_INFINITY (*_f64_infinity)
GLOBAL const u32 _f32_nan_u32 = 0x7f800001;
GLOBAL const f32 *_f32_nan = (f32 *)&_f32_nan_u32;
#define F32_NAN (*_f32_nan)
Global const u32 _f32_nan_u32 = 0x7f800001;
Global const f32 *_f32_nan = (f32 *)&_f32_nan_u32;
#define F32Nan (*_f32_nan)
GLOBAL const u64 _f64_nan_u64 = 0x7ff8000000000001;
GLOBAL const f64 *_f64_nan = (f64 *)&_f64_nan_u64;
#define F64_NAN (*_f64_nan)
Global const u64 _f64_nan_u64 = 0x7ff8000000000001;
Global const f64 *_f64_nan = (f64 *)&_f64_nan_u64;
#define F64Nan (*_f64_nan)
#define F32_IS_NAN(x) (x != x)
#define F64_IS_NAN(x) (x != x)
#define Struct(name) typedef struct name name; struct name
#define AlignedStruct(name, n) typedef struct name name; struct alignas(n) name
/* ========================== *
* Common structs
* ========================== */
Struct(ImageDataRgba)
{
u32 width;
u32 height;
u32 *pixels; /* Array of [width * height] pixels */
};
Struct(PcmData)
{
u64 count;
i16 *samples;
};
#define IsF32Nan(x) (x != x)
#define IsF64Nan(x) (x != x)
#ifdef __cplusplus
}

View File

@ -1,4 +1,4 @@
#if PLATFORM_WINDOWS
#if PlatformIsWindows
#define WIN32_LEAN_AND_MEAN
#define UNICODE

View File

@ -1,4 +1,4 @@
#define MAX_FIBERS 4096
STATIC_ASSERT(MAX_FIBERS < I16_MAX); /* Fiber id should fit max fibers */
StaticAssert(MAX_FIBERS < I16Max); /* Fiber id should fit max fibers */
i16 FiberId(void);

View File

@ -1,4 +1,4 @@
#if COMPILER_MSVC
#if CompilerIsMsvc
/* ========================== *
* Msvc RC file lookup
@ -17,7 +17,7 @@ struct rc_search_params {
};
/* Find first resource with `type` and return the data in `udata`. */
INTERNAL BOOL CALLBACK enum_func(HMODULE module, LPCWSTR type, LPCWSTR wstr_entry_name, LONG_PTR udata)
internal BOOL CALLBACK enum_func(HMODULE module, LPCWSTR type, LPCWSTR wstr_entry_name, LONG_PTR udata)
{
TempArena scratch = BeginScratchNoConflict();
struct rc_search_params *params = (struct rc_search_params *)udata;

View File

@ -1,9 +1,9 @@
#if COMPILER_MSVC
#if CompilerIsMsvc
/* ========================== *
* Msvc RC file incbin
*
* NOTE: Msvc doesn't have an inline assembler that can include binary data.
* NOTE: Msvc doesn't have an Inline assembler that can include binary data.
* So instead these macros will trigger a lookup into the embedded RC file for
* entries matched by name (requires the build system to generate and link RC
* file).
@ -35,9 +35,9 @@ String _incbin_get(struct _IncbinRcResource *inc);
#define INCBINSTR2(x) #x
#define INCBINSTR(x) INCBINSTR2(x)
#if PLATFORM_WINDOWS
#if PlatformIsWindows
# define INCBIN_SECTION ".rdata, \"dr\""
#elif PLATFORM_MAC
#elif PlatformIsMac
# define INCBIN_SECTION "__TEXT,__const"
#else
# define INCBIN_SECTION ".rodata"

View File

@ -4,21 +4,21 @@
/* Sqrt */
INLINE f32 ix_sqrt_f32(f32 f)
Inline f32 ix_sqrt_f32(f32 f)
{
__m128 n = _mm_set_ss(f);
n = _mm_sqrt_ss(n);
return _mm_cvtss_f32(n);
}
INLINE f64 ix_sqrt_f64(f64 f)
Inline f64 ix_sqrt_f64(f64 f)
{
__m128d n = _mm_set_sd(f);
n = _mm_sqrt_sd(_mm_setzero_pd(), n);
return _mm_cvtsd_f64(n);
}
INLINE f32 ix_rsqrt_f32(f32 f)
Inline f32 ix_rsqrt_f32(f32 f)
{
__m128 n = _mm_set_ss(f);
n = _mm_rsqrt_ss(n);
@ -27,78 +27,78 @@ INLINE f32 ix_rsqrt_f32(f32 f)
/* Round */
INLINE i32 ix_round_f32_to_i32(f32 f)
Inline i32 ix_round_f32_to_i32(f32 f)
{
return _mm_cvtss_si32(_mm_round_ss(_mm_setzero_ps(), _mm_set_ss(f), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
}
INLINE f32 ix_round_f32_to_f32(f32 f)
Inline f32 ix_round_f32_to_f32(f32 f)
{
return _mm_cvtss_f32(_mm_round_ss(_mm_setzero_ps(), _mm_set_ss(f), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
}
INLINE i64 ix_round_f64_to_i64(f64 f)
Inline i64 ix_round_f64_to_i64(f64 f)
{
return _mm_cvtsd_si64(_mm_round_sd(_mm_setzero_pd(), _mm_set_sd(f), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
}
INLINE f64 ix_round_f64_to_f64(f64 f)
Inline f64 ix_round_f64_to_f64(f64 f)
{
return _mm_cvtsd_f64(_mm_round_sd(_mm_setzero_pd(), _mm_set_sd(f), _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
}
/* Floor */
INLINE i32 ix_floor_f32_to_i32(f32 f)
Inline i32 ix_floor_f32_to_i32(f32 f)
{
return _mm_cvtss_si32(_mm_floor_ss(_mm_setzero_ps(), _mm_set_ss(f)));
}
INLINE f32 ix_floor_f32_to_f32(f32 f)
Inline f32 ix_floor_f32_to_f32(f32 f)
{
return _mm_cvtss_f32(_mm_floor_ss(_mm_setzero_ps(), _mm_set_ss(f)));
}
INLINE i64 ix_floor_f64_to_i64(f64 f)
Inline i64 ix_floor_f64_to_i64(f64 f)
{
return _mm_cvtsd_si64(_mm_floor_sd(_mm_setzero_pd(), _mm_set_sd(f)));
}
INLINE f64 ix_floor_f64_to_f64(f64 f)
Inline f64 ix_floor_f64_to_f64(f64 f)
{
return _mm_cvtsd_f64(_mm_floor_sd(_mm_setzero_pd(), _mm_set_sd(f)));
}
/* Ceil */
INLINE i32 ix_ceil_f32_to_i32(f32 f)
Inline i32 ix_ceil_f32_to_i32(f32 f)
{
return _mm_cvtss_si32(_mm_ceil_ss(_mm_setzero_ps(), _mm_set_ss(f)));
}
INLINE f32 ix_ceil_f32_to_f32(f32 f)
Inline f32 ix_ceil_f32_to_f32(f32 f)
{
return _mm_cvtss_f32(_mm_ceil_ss(_mm_setzero_ps(), _mm_set_ss(f)));
}
INLINE i64 ix_ceil_f64_to_i64(f64 f)
Inline i64 ix_ceil_f64_to_i64(f64 f)
{
return _mm_cvtsd_si64(_mm_ceil_sd(_mm_setzero_pd(), _mm_set_sd(f)));
}
INLINE f64 ix_ceil_f64_to_f64(f64 f)
Inline f64 ix_ceil_f64_to_f64(f64 f)
{
return _mm_cvtsd_f64(_mm_ceil_sd(_mm_setzero_pd(), _mm_set_sd(f)));
}
/* Truncate */
INLINE f32 ix_trunc_f32_to_f32(f32 f)
Inline f32 ix_trunc_f32_to_f32(f32 f)
{
return _mm_cvtss_f32(_mm_round_ss(_mm_setzero_ps(), _mm_set_ss(f), _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
}
INLINE f64 ix_trunc_f64_to_f64(f64 f)
Inline f64 ix_trunc_f64_to_f64(f64 f)
{
return _mm_cvtsd_f64(_mm_round_sd(_mm_setzero_pd(), _mm_set_sd(f), _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
}
@ -107,12 +107,12 @@ INLINE f64 ix_trunc_f64_to_f64(f64 f)
* Util
* ========================== */
INLINE void ix_pause(void)
Inline void ix_pause(void)
{
_mm_pause();
}
INLINE u64 ix_clock(void)
Inline u64 ix_clock(void)
{
return __rdtsc();
}

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,7 @@
* Memory operations
* ========================== */
#if !CRTLIB
#if !CrtlibIsEnabled
__attribute((section(".text.memcpy")))
void *memcpy(void *__restrict dst, const void *__restrict src, u64 n)
@ -35,13 +35,13 @@ i32 memcmp(const void *p1, const void *p2, u64 n)
return res;
}
#endif /* !CRTLIB */
#endif /* !CrtlibIsEnabled */
/* ========================== *
* Memory allocation
* ========================== */
#if PLATFORM_WINDOWS
#if PlatformIsWindows
#define WIN32_LEAN_AND_MEAN
#define UNICODE

View File

@ -16,7 +16,7 @@
#define MEMSET(ptr, val, count) memset((ptr), (val), (count))
#if CRTLIB
#if CrtlibIsEnabled
# include <memory.h>
#else
void *memcpy(void *__restrict dst, const void *__restrict src, u64 n);

View File

@ -1,7 +1,7 @@
/* TODO: Use a value that gives good precision when dividing into range 0 -> 1 */
#define F64_RAND_MAX U64_MAX
#define F64_RAND_MAX U64Max
#if PLATFORM_WINDOWS
#if PlatformIsWindows
# define BCRYPT_RNG_ALG_HANDLE ((void *)0x00000081)
u32 BCryptGenRandom(void *algorithm, u8 *buffer, u32 buffer_size, u32 flags);
void rand_true(String buffer)

View File

@ -29,7 +29,7 @@ String string_from_char(Arena *arena, char c)
String string_from_uint(Arena *arena, u64 n, u64 base, u64 zfill)
{
/* Base too large */
ASSERT(base <= (countof(INT_CHARS) - 1));
Assert(base <= (countof(INT_CHARS) - 1));
TempArena scratch = BeginScratch(arena);
@ -95,7 +95,7 @@ String string_from_float(Arena *arena, f64 f, u32 precision)
u8 *final_text = PushDry(arena, u8);
u64 final_len = 0;
if (F32_IS_NAN(f)) {
if (IsF32Nan(f)) {
final_len += string_copy(arena, LIT("NaN")).len;
} else if (f == F64_INFINITY) {
final_len += string_copy(arena, LIT("inf")).len;
@ -483,14 +483,14 @@ String string_formatv(Arena *arena, String fmt, va_list args)
case FMT_TYPE_END: {
/* Unexpected end. Not enough FMT args passed to function. */
ASSERT(0);
Assert(0);
parsed_str = string_copy(arena, LIT("<?>"));
no_more_args = 1;
} break;
default: {
/* Unknown format type */
ASSERT(0);
Assert(0);
parsed_str = string_copy(arena, LIT("<?>"));
no_more_args = 1;
} break;
@ -506,11 +506,11 @@ String string_formatv(Arena *arena, String fmt, va_list args)
}
}
#if RTC
#if RtcIsEnabled
if (!no_more_args) {
struct fmt_arg last_arg = va_arg(args, struct fmt_arg);
/* End arg not reached. Too many FMT values passed to function. */
ASSERT(last_arg.type == FMT_TYPE_END);
Assert(last_arg.type == FMT_TYPE_END);
}
#endif

View File

@ -23,16 +23,16 @@ Struct(StringArray) {
* ========================== */
/* Expand C string literal with size for string initialization */
#define LIT(cstr_lit) CPPCOMPAT_INITLIST_TYPE(String) { (sizeof((cstr_lit)) - 1), (u8 *)(cstr_lit) }
#define LIT(cstr_lit) CppCompatInitListType(String) { (sizeof((cstr_lit)) - 1), (u8 *)(cstr_lit) }
/* Same as `STR`, but works with static variable initialization */
#define LIT_NOCAST(cstr_lit) { .len = (sizeof((cstr_lit)) - 1), .text = (u8 *)(cstr_lit) }
#define STRING(size, data) (CPPCOMPAT_INITLIST_TYPE(String) { (size), (data) })
#define STRING(size, data) (CppCompatInitListType(String) { (size), (data) })
#define STRING_FROM_POINTERS(p0, p1) (CPPCOMPAT_INITLIST_TYPE(String) { (u8 *)(p1) - (u8 *)(p0), (u8 *)p0 })
#define STRING_FROM_POINTERS(p0, p1) (CppCompatInitListType(String) { (u8 *)(p1) - (u8 *)(p0), (u8 *)p0 })
#define STRING_FROM_STRUCT(ptr) (CPPCOMPAT_INITLIST_TYPE(String) { sizeof(*(ptr)), (u8 *)(ptr) })
#define STRING_FROM_STRUCT(ptr) (CppCompatInitListType(String) { sizeof(*(ptr)), (u8 *)(ptr) })
#define STRING_FROM_ARENA(arena) (STRING((arena)->pos, ArenaBase(arena)))
@ -40,7 +40,7 @@ Struct(StringArray) {
#define STRING_FROM_ARRAY(a) \
( \
/* Must be array */ \
ASSERT(IS_ARRAY(a)), \
Assert(IsArray(a)), \
((String) { .len = sizeof(a), .text = (u8 *)(a) }) \
)
@ -140,7 +140,7 @@ String string_formatv(Arena *arena, String fmt, va_list args);
struct string_codepoint_iter {
u32 codepoint;
/* Internal */
/* internal */
String src;
u64 pos;
};

View File

@ -4,9 +4,9 @@ Struct(UID) {
u64 lo;
};
INLINE b32 uid_eq(UID a, UID b) { return a.hi == b.hi && a.lo == b.lo; }
Inline b32 uid_eq(UID a, UID b) { return a.hi == b.hi && a.lo == b.lo; }
INLINE b32 uid_is_zero(UID v) { return v.hi == 0 && v.lo == 0; }
Inline b32 uid_is_zero(UID v) { return v.hi == 0 && v.lo == 0; }
UID uid_true_rand(void);

View File

@ -4,11 +4,11 @@
Utf8DecodeResult uni_decode_utf8(String str)
{
LOCAL_PERSIST const u8 lengths[32] = {
LocalPersist const u8 lengths[32] = {
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,2,2,2,2,3,3,4,5
};
u32 codepoint = U32_MAX;
u32 codepoint = U32Max;
u32 advance = 0;
if (str.len > 0) {
u8 c0 = str.text[0];
@ -109,7 +109,7 @@ Utf8EncodeResult uni_encode_utf8(u32 codepoint)
Utf16DecodeResult uni_decode_utf16(String16 str)
{
u32 codepoint = U32_MAX;
u32 codepoint = U32Max;
u32 advance = 0;
if (str.len >= 1) {
@ -168,7 +168,7 @@ b32 uni_is_utf16_low_surrogate(u16 c)
Utf32DecodeResult uni_decode_utf32(String32 str)
{
u32 codepoint = U32_MAX;
u32 codepoint = U32Max;
u32 advance = 0;
if (str.len >= 1) {

View File

@ -9,7 +9,7 @@
*/
#define HASH_FNV64_BASIS 0xCBF29CE484222325
INLINE u64 hash_fnv64(u64 seed, String s)
Inline u64 hash_fnv64(u64 seed, String s)
{
u64 hash = seed;
for (u64 i = 0; i < s.len; ++i) {
@ -31,7 +31,7 @@ INLINE u64 hash_fnv64(u64 seed, String s)
#define SORT_COMPARE_FUNC_DEF(name, arg_a, arg_b, arg_udata) i32 name(void *arg_a, void *arg_b, void *arg_udata)
typedef SORT_COMPARE_FUNC_DEF(sort_compare_func, a, b, udata);
INLINE void merge_sort_internal(u8 *left, u8 *right, u8 *items, u64 left_count, u64 right_count, u64 item_size, sort_compare_func *callback, void *udata)
Inline void merge_sort_internal(u8 *left, u8 *right, u8 *items, u64 left_count, u64 right_count, u64 item_size, sort_compare_func *callback, void *udata)
{
/* Sort */
u64 i = 0;
@ -66,7 +66,7 @@ INLINE void merge_sort_internal(u8 *left, u8 *right, u8 *items, u64 left_count,
}
}
INLINE void merge_sort(void *items, u64 item_count, u64 item_size, sort_compare_func *callback, void *udata)
Inline void merge_sort(void *items, u64 item_count, u64 item_size, sort_compare_func *callback, void *udata)
{
if (item_count > 1) {
TempArena scratch = BeginScratchNoConflict();
@ -116,7 +116,7 @@ Struct(Dict) {
DictEntry *last;
};
INLINE Dict *dict_init(Arena *arena, u64 bins_count)
Inline Dict *dict_init(Arena *arena, u64 bins_count)
{
__prof;
Dict *dict = PushStruct(arena, Dict);
@ -125,7 +125,7 @@ INLINE Dict *dict_init(Arena *arena, u64 bins_count)
return dict;
}
INLINE void dict_reset(Dict *dict)
Inline void dict_reset(Dict *dict)
{
MEMZERO(dict->bins, sizeof(*dict->bins) * dict->bins_count);
if (dict->first) {
@ -134,7 +134,7 @@ INLINE void dict_reset(Dict *dict)
}
}
INLINE DictEntry *dict_ensure_entry(Arena *arena, Dict *dict, u64 hash)
Inline DictEntry *dict_ensure_entry(Arena *arena, Dict *dict, u64 hash)
{
__prof;
DictBin *bin = &dict->bins[hash % dict->bins_count];
@ -177,14 +177,14 @@ INLINE DictEntry *dict_ensure_entry(Arena *arena, Dict *dict, u64 hash)
return entry;
}
INLINE void dict_set(Arena *arena, Dict *dict, u64 hash, u64 value)
Inline void dict_set(Arena *arena, Dict *dict, u64 hash, u64 value)
{
__prof;
DictEntry *entry = dict_ensure_entry(arena, dict, hash);
entry->value = value;
}
INLINE DictEntry *dict_get_entry(Dict *dict, u64 hash)
Inline DictEntry *dict_get_entry(Dict *dict, u64 hash)
{
__prof;
DictEntry *result = 0;
@ -199,14 +199,14 @@ INLINE DictEntry *dict_get_entry(Dict *dict, u64 hash)
return result;
}
INLINE u64 dict_get(Dict *dict, u64 hash)
Inline u64 dict_get(Dict *dict, u64 hash)
{
__prof;
DictEntry *entry = dict_get_entry(dict, hash);
return entry ? entry->value : 0;
}
INLINE void dict_remove_entry(Dict *dict, DictEntry *entry)
Inline void dict_remove_entry(Dict *dict, DictEntry *entry)
{
/* Remove from bin */
{

View File

@ -115,7 +115,7 @@ b32 BB_CheckWriterOverflowBits(BB_Writer *bw, u64 num_bits)
if (bytes_needed >= max_len)
{
/* Writer overflowed fixed buffer */
ASSERT(0);
Assert(0);
res = 1;
bw->cur_bit = max_len << 3;
bw->overflowed = 1;
@ -145,7 +145,7 @@ void BB_AlignWriter(BB_Writer *bw)
void BB_WriteUBitsNoMagic(BB_Writer *bw, u64 value, u8 num_bits)
{
ASSERT(num_bits > 0 && (num_bits == 64 || value <= ~(U64_MAX << num_bits))); /* Bit count must be able to hold value */
Assert(num_bits > 0 && (num_bits == 64 || value <= ~(U64Max << num_bits))); /* Bit count must be able to hold value */
if (BB_CheckWriterOverflowBits(bw, num_bits))
{
return;
@ -417,7 +417,7 @@ b32 BB_CheckReaderOverflowBits(BB_Reader *br, u64 num_bits)
if (bits_needed > base_len_bits)
{
/* Tried to read past bitbuff memory */
ASSERT(0);
Assert(0);
res = 1;
br->cur_bit = base_len_bits;
br->overflowed = 1;
@ -472,10 +472,10 @@ u64 BB_ReadUBitsNoMagic(BB_Reader *br, u8 num_bits)
u8 num_bytes = (num_bits + 7) >> 3;
u64 tmp = 0;
MEMCPY(&tmp, at, num_bytes);
u64 mask = U64_MAX;
u64 mask = U64Max;
if (num_bits < 64)
{
mask = ~(U64_MAX << num_bits);
mask = ~(U64Max << num_bits);
}
tmp &= mask;
res |= tmp << num_trailing_bits;
@ -491,7 +491,7 @@ u64 BB_ReadUBits(BB_Reader *br, u8 num_bits)
i64 BB_ReadIBits(BB_Reader *br, u8 num_bits)
{
ASSERT(num_bits > 1);
Assert(num_bits > 1);
BB_ReadDebugMagic(br, BB_DebugMagicKind_IBits, num_bits);
u64 tc = BB_ReadUBits(br, num_bits);
return BB_IntFromTwosCompliment(tc, num_bits);
@ -666,7 +666,7 @@ void BB_SeekToByte(BB_Reader *br, u64 pos)
else
{
/* Tried to seek byte backwards in reader */
ASSERT(0);
Assert(0);
br->overflowed = 1;
br->cur_bit = (br->base_len << 3);
}
@ -691,10 +691,10 @@ void BB_ReadDebugMagic(BB_Reader *br, BB_DebugMagicKind expected_magic, u8 expec
u8 stored_num_bits = (stored >> 16) & 0xFF;
/* Verify stored magic match */
ASSERT(expected_magic == stored_magic);
Assert(expected_magic == stored_magic);
/* Verify stored bit count match */
ASSERT(expected_num_bits == stored_num_bits);
Assert(expected_num_bits == stored_num_bits);
}
}
@ -705,7 +705,7 @@ void BB_ReadDebugMarker(BB_Reader *br, String name)
{
u8 c_stored = BB_ReadUBitsNoMagic(br, 8);
u8 c_expected = name.text[i];
ASSERT(c_expected == c_stored);
Assert(c_expected == c_stored);
}
}
@ -716,10 +716,10 @@ void BB_ReadDebugMarker(BB_Reader *br, String name)
u64 BB_TwosComplimentFromUint(u64 value, u8 num_bits)
{
u64 mask = U64_MAX;
u64 mask = U64Max;
if (num_bits < 64)
{
mask = ~(U64_MAX << num_bits);
mask = ~(U64Max << num_bits);
}
u64 tc = (~value & mask) + 1;
tc &= mask;
@ -792,14 +792,14 @@ void BB_Test(void)
{ kind_uv, .uv = { 100 } },
{ kind_uv, .uv = { 10000 } },
{ kind_uv, .uv = { 10000000000000 } },
{ kind_uv, .uv = { U64_MAX } },
{ kind_uv, .uv = { U64Max } },
{ kind_iv, .iv = { 0 } },
{ kind_iv, .iv = { -1 } },
{ kind_iv, .iv = { 10000000000000 } },
{ kind_iv, .iv = { -10000000000000 } },
{ kind_iv, .iv = { I64_MAX } },
{ kind_iv, .iv = { I64_MIN } },
{ kind_iv, .iv = { I64Max } },
{ kind_iv, .iv = { I64Min } },
{ kind_string, .s = { LIT("Hello there! Hope you're doing well.") } },
{ kind_ibits, .ibits = { 3, 3 } },
@ -809,7 +809,7 @@ void BB_Test(void)
String encoded = ZI;
{
BB_Buff bb = AllocBitbuff(GIBI(64));
BB_Buff bb = AllocBitbuff(Gibi(64));
BB_Writer bw = BB_WriterFromBuff(&bb);
for (u64 i = 0; i < countof(cases); ++i)
{
@ -836,7 +836,7 @@ void BB_Test(void)
}
else
{
ASSERT(0);
Assert(0);
}
}
encoded = BB_GetWritten(scratch.arena, &bw);
@ -852,35 +852,35 @@ void BB_Test(void)
{
u64 w = c.ubits.v;
u64 r = BB_ReadUBits(&br, c.ubits.num_bits);
ASSERT(r == w);
Assert(r == w);
}
else if (c.kind == kind_ibits)
{
i64 w = c.ibits.v;
i64 r = BB_ReadIBits(&br, c.ubits.num_bits);
ASSERT(r == w);
Assert(r == w);
}
else if (c.kind == kind_uv)
{
u64 w = c.uv.v;
u64 r = BB_ReadUV(&br);
ASSERT(r == w);
Assert(r == w);
}
else if (c.kind == kind_iv)
{
i64 w = c.iv.v;
i64 r = BB_ReadIV(&br);
ASSERT(r == w);
Assert(r == w);
}
else if (c.kind == kind_string)
{
String w = c.s.v;
String r = BB_ReadString(scratch.arena, &br);
ASSERT(string_eq(r, w));
Assert(string_eq(r, w));
}
else
{
ASSERT(0);
Assert(0);
}
}
}

View File

@ -8,9 +8,9 @@
#define MAX_EPA_ITERATIONS 64
#if COLLIDER_DEBUG
INTERNAL void _dbgbreakable(void)
internal void _dbgbreakable(void)
{
#if RTC
#if RtcIsEnabled
DEBUGBREAKABLE;
#endif
}
@ -26,7 +26,7 @@ INTERNAL void _dbgbreakable(void)
#define DBGSTEP
#endif
INTERNAL CLD_SupportPoint collider_get_support_point_internal(CLD_Shape *shape, Xform xf, V2 dir, i32 ignore)
internal CLD_SupportPoint collider_get_support_point_internal(CLD_Shape *shape, Xform xf, V2 dir, i32 ignore)
{
V2 *points = shape->points;
u32 count = shape->count;
@ -86,7 +86,7 @@ CLD_SupportPoint collider_get_support_point(CLD_Shape *shape, Xform xf, V2 dir)
return collider_get_support_point_internal(shape, xf, dir, -1);
}
INTERNAL CLD_MenkowskiPoint get_menkowski_point(CLD_Shape *shape0, CLD_Shape *shape1, Xform xf0, Xform xf1, V2 dir)
internal CLD_MenkowskiPoint get_menkowski_point(CLD_Shape *shape0, CLD_Shape *shape1, Xform xf0, Xform xf1, V2 dir)
{
CLD_MenkowskiPoint res;
res.s0 = collider_get_support_point(shape0, xf0, dir);
@ -156,9 +156,9 @@ struct gjk_result {
};
#if COLLIDER_DEBUG
INTERNAL struct gjk_result gjk_get_simplex(CLD_Shape *shape0, CLD_Shape *shape1, Xform xf0, Xform xf1, f32 min_unique_pt_dist_sq, u32 dbg_step)
internal struct gjk_result gjk_get_simplex(CLD_Shape *shape0, CLD_Shape *shape1, Xform xf0, Xform xf1, f32 min_unique_pt_dist_sq, u32 dbg_step)
#else
INTERNAL struct gjk_result gjk_get_simplex(CLD_Shape *shape0, CLD_Shape *shape1, Xform xf0, Xform xf1, f32 min_unique_pt_dist_sq)
internal struct gjk_result gjk_get_simplex(CLD_Shape *shape0, CLD_Shape *shape1, Xform xf0, Xform xf1, f32 min_unique_pt_dist_sq)
#endif
{
b32 overlapping = 0;
@ -323,9 +323,9 @@ struct epa_result {
};
#if COLLIDER_DEBUG
INTERNAL struct epa_result epa_get_normal_from_gjk(CLD_Shape *shape0, CLD_Shape *shape1, Xform xf0, Xform xf1, struct gjk_result gjk_res, f32 min_unique_pt_dist_sq, u32 max_iterations, u32 dbg_step)
internal struct epa_result epa_get_normal_from_gjk(CLD_Shape *shape0, CLD_Shape *shape1, Xform xf0, Xform xf1, struct gjk_result gjk_res, f32 min_unique_pt_dist_sq, u32 max_iterations, u32 dbg_step)
#else
INTERNAL struct epa_result epa_get_normal_from_gjk(CLD_Shape *shape0, CLD_Shape *shape1, Xform xf0, Xform xf1, struct gjk_result gjk_res, f32 min_unique_pt_dist_sq, u32 max_iterations)
internal struct epa_result epa_get_normal_from_gjk(CLD_Shape *shape0, CLD_Shape *shape1, Xform xf0, Xform xf1, struct gjk_result gjk_res, f32 min_unique_pt_dist_sq, u32 max_iterations)
#endif
{
TempArena scratch = BeginScratchNoConflict();
@ -339,7 +339,7 @@ INTERNAL struct epa_result epa_get_normal_from_gjk(CLD_Shape *shape0, CLD_Shape
CLD_MenkowskiSimplex s = gjk_res.simplex;
proto = PushDry(scratch.arena, CLD_MenkowskiPoint);
{
ASSERT(s.len == 3);
Assert(s.len == 3);
CLD_MenkowskiPoint *tmp = PushArrayNoZero(scratch.arena, CLD_MenkowskiPoint, 3);
tmp[0] = s.a;
tmp[1] = s.b;
@ -481,7 +481,7 @@ struct clip_line_to_line_result {
V2 a1_clipped, b1_clipped;
};
INTERNAL struct clip_line_to_line_result clip_line_to_line(V2 a0, V2 b0, V2 a1, V2 b1, V2 normal)
internal struct clip_line_to_line_result clip_line_to_line(V2 a0, V2 b0, V2 a1, V2 b1, V2 normal)
{
V2 vab0 = v2_sub(b0, a0);
V2 vab1 = v2_sub(b1, a1);
@ -516,7 +516,7 @@ INTERNAL struct clip_line_to_line_result clip_line_to_line(V2 a0, V2 b0, V2 a1,
return res;
}
INTERNAL V2 clip_point_to_line(V2 a, V2 b, V2 p, V2 normal)
internal V2 clip_point_to_line(V2 a, V2 b, V2 p, V2 normal)
{
V2 vab = v2_sub(b, a);
V2 vap = v2_sub(p, a);
@ -590,7 +590,7 @@ CLD_CollisionResult collider_collision_points(CLD_Shape *shape0, CLD_Shape *shap
}
} else {
/* Project origin to determine if distance is within tolerance. */
ASSERT(f.len == 2);
Assert(f.len == 2);
V2 vab = v2_sub(f.b.p, f.a.p);
V2 vao = v2_neg(f.a.p);
f32 ratio = clamp_f32(v2_dot(vab, vao) / v2_dot(vab, vab), 0, 1);
@ -604,7 +604,7 @@ CLD_CollisionResult collider_collision_points(CLD_Shape *shape0, CLD_Shape *shap
/* Clip to determine final points */
if (colliding) {
/* Max vertices must be < 16 to fit in 4 bit ids */
STATIC_ASSERT(countof(shape0->points) <= 16);
StaticAssert(countof(shape0->points) <= 16);
CLD_MenkowskiFeature f = epa_res.closest_feature;
@ -829,7 +829,7 @@ CLD_ClosestResult collider_closest_points(CLD_Shape *shape0, CLD_Shape *shape1,
p1 = f.a.s1.p;
colliding = gjk_res.overlapping || v2_len_sq(v2_neg(f.a.p)) <= (tolerance * tolerance);
} else {
ASSERT(f.len == 2);
Assert(f.len == 2);
/* FIXME: Winding order dependent? */
f32 ratio;
{
@ -983,7 +983,7 @@ V2Array cloud(Arena *arena, CLD_Shape *shape0, CLD_Shape *shape1, Xform xf0, Xfo
}
/* ========================== *
* Boolean GJK (unused)
* Boolean GJK (UNUSED)
* ========================== */
#if 0

View File

@ -3,14 +3,14 @@
#define WRITE_DIR "power_play"
/* Window title */
#if RTC
# if DEVELOPER
#if RtcIsEnabled
# if DeveloperIsEnabled
# define WINDOW_TITLE "Debug (Developer Build)"
# else
# define WINDOW_TITLE "Debug"
# endif
#else
# if DEVELOPER
# if DeveloperIsEnabled
# define WINDOW_TITLE "Power Play (Developer Build)"
# else
# define WINDOW_TITLE "Power Play"
@ -20,8 +20,8 @@
/* If we are not compiling in developer mode, assume resources are embedded as
* a tar archive in the executable. Otherwise, assume resources are files on
* disk. */
#define RESOURCES_EMBEDDED (!DEVELOPER)
#define RESOURCE_RELOADING (DEVELOPER && !RESOURCES_EMBEDDED)
#define RESOURCES_EMBEDDED (!DeveloperIsEnabled)
#define RESOURCE_RELOADING (DeveloperIsEnabled && !RESOURCES_EMBEDDED)
#define DEFAULT_CAMERA_WIDTH (16)
#define DEFAULT_CAMERA_HEIGHT ((f64)DEFAULT_CAMERA_WIDTH / (16.0 / 9.0))
@ -77,15 +77,15 @@
/* If enabled, bitbuffs will insert/verify magic numbers & length for each read & write */
#define DebugBitbuff 0
#define BITBUFF_TEST RTC
#define BITBUFF_TEST RtcIsEnabled
/* If enabled, things like network writes & memory allocations will be tracked in a global statistics struct */
#define GSTAT_ENABLED 1
#define PROF_THREAD_GROUP_FIBERS -(i64)GIBI(1)
#define PROF_THREAD_GROUP_SCHEDULER -(i64)MEBI(3)
#define PROF_THREAD_GROUP_WINDOW -(i64)MEBI(2)
#define PROF_THREAD_GROUP_MAIN -(i64)MEBI(1)
#define PROF_THREAD_GROUP_FIBERS -(i64)Gibi(1)
#define PROF_THREAD_GROUP_SCHEDULER -(i64)Mebi(3)
#define PROF_THREAD_GROUP_WINDOW -(i64)Mebi(2)
#define PROF_THREAD_GROUP_MAIN -(i64)Mebi(1)
/* ========================== *
* Settings
@ -93,6 +93,6 @@
/* TODO: Move these to user-configurable settings */
#define VSYNC !RTC
#define VSYNC !RtcIsEnabled
#define AUDIO_ENABLED 0
#define FPS_LIMIT 300

View File

@ -1,6 +1,6 @@
GLOBAL struct {
Global struct {
G_Resource *solid_white_texture;
} G = ZI, DEBUG_ALIAS(G, G_draw);
} G = ZI, DebugAlias(G, G_draw);
/* ========================== *
* Startup
@ -98,7 +98,7 @@ void draw_circle(G_RenderSig *sig, V2 pos, f32 radius, u32 color, u32 detail)
void draw_quad(G_RenderSig *sig, Quad quad, u32 color)
{
LOCAL_PERSIST u32 indices_array[6] = {
LocalPersist u32 indices_array[6] = {
0, 1, 2,
0, 2, 3
};
@ -269,7 +269,7 @@ void draw_grid(G_RenderSig *sig, Xform xf, u32 bg0_color, u32 bg1_color, u32 lin
G_RenderCmdDesc cmd = ZI;
cmd.kind = GP_RENDER_CMD_KIND_DRAW_MATERIAL;
cmd.material.xf = xf;
cmd.material.tint = COLOR_WHITE;
cmd.material.tint = ColorWhite;
cmd.material.grid_cmd_id = grid_id;
gp_push_render_cmd(sig, &cmd);
}

View File

@ -6,7 +6,7 @@ D_StartupReceipt draw_startup(F_StartupReceipt *font_sr);
* ========================== */
#define DRAW_MATERIAL_PARAMS(...) ((D_MaterialParams) { \
.tint = COLOR_WHITE, \
.tint = ColorWhite, \
.clip = CLIP_ALL, \
__VA_ARGS__ \
})
@ -67,7 +67,7 @@ void draw_grid(G_RenderSig *sig, Xform xf, u32 bg0_color, u32 bg1_color, u32 lin
* ========================== */
#define DRAW_UI_RECT_PARAMS(...) ((D_UiRectParams) { \
.tint = COLOR_WHITE, \
.tint = ColorWhite, \
.clip = CLIP_ALL, \
__VA_ARGS__ \
})
@ -90,7 +90,7 @@ void draw_ui_rect(G_RenderSig *sig, D_UiRectParams params);
.alignment = DRAW_TEXT_ALIGNMENT_LEFT, \
.offset_x = DRAW_TEXT_OFFSET_X_LEFT, \
.offset_y = DRAW_TEXT_OFFSET_Y_TOP, \
.color = COLOR_WHITE, \
.color = ColorWhite, \
__VA_ARGS__ \
})

View File

@ -3,7 +3,7 @@ extern "C"
#include "dxc.h"
}
#if PLATFORM_WINDOWS
#if PlatformIsWindows
# include "dxc_core_win32.cpp"
#else
# error Dxc core not implemented for this platform

View File

@ -12,11 +12,11 @@ DXC_Result dxc_compile(Arena *arena, String shader_source, i32 num_args, String
#else
#if COMPILER_CLANG
#if CompilerIsClang
# pragma clang diagnostic ignored "-Wlanguage-extension-token"
#endif
#define _ALLOW_RTCc_IN_STL
#define _ALLOW_RtcIsEnabledc_IN_STL
#pragma warning(push, 0)
# define WIN32_LEAN_AND_MEAN

View File

@ -1,5 +1,5 @@
#define LOOKUP_TABLE_SIZE (256)
GLOBAL u32 g_font_codes[] = {
Global u32 g_font_codes[] = {
0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27,0x28,0x29,0x2A,0x2B,0x2C,0x2D,0x2E,0x2F,0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37,0x38,0x39,0x3A,0x3B,0x3C,0x3D,0x3E,0x3F,0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4A,0x4B,0x4C,0x4D,0x4E,0x4F,0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5A,0x5B,0x5C,0x5D,0x5E,0x5F,0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6A,0x6B,0x6C,0x6D,0x6E,0x6F,0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7A,0x7B,0x7C,0x7D,0x7E,0x7F,0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87,0x88,0x89,0x8A,0x8B,0x8C,0x8D,0x8E,0x8F,0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9A,0x9B,0x9C,0x9D,0x9E,0x9F,0xA0,0xA1,0xA2,0xA3,0xA4,0xA5,0xA6,0xA7,0xA8,0xA9,0xAA,0xAB,0xAC,0xAD,0xAE,0xAF,0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF,0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF,0xD0,0xD1,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,0xD8,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF,0xE0,0xE1,0xE2,0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xEB,0xEC,0xED,0xEE,0xEF,0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF
};
@ -22,9 +22,9 @@ struct font_task_params_store {
* Global state
* ========================== */
GLOBAL struct {
Global struct {
struct font_task_params_store params;
} G = ZI, DEBUG_ALIAS(G, G_font);
} G = ZI, DebugAlias(G, G_font);
/* ========================== *
* Startup
@ -36,7 +36,7 @@ F_StartupReceipt font_startup(AC_StartupReceipt *asset_cache_sr,
__prof;
(UNUSED)asset_cache_sr;
(UNUSED)ttf_sr;
G.params.arena = AllocArena(GIBI(64));
G.params.arena = AllocArena(Gibi(64));
return (F_StartupReceipt) { 0 };
}
@ -44,7 +44,7 @@ F_StartupReceipt font_startup(AC_StartupReceipt *asset_cache_sr,
* Load task param store
* ========================== */
INTERNAL struct font_task_params *font_task_params_alloc(void)
internal struct font_task_params *font_task_params_alloc(void)
{
struct font_task_params *p = 0;
{
@ -60,7 +60,7 @@ INTERNAL struct font_task_params *font_task_params_alloc(void)
return p;
}
INTERNAL void font_task_params_release(struct font_task_params *p)
internal void font_task_params_release(struct font_task_params *p)
{
P_Lock lock = P_LockE(&G.params.mutex);
p->next_free = G.params.head_free;
@ -72,7 +72,7 @@ INTERNAL void font_task_params_release(struct font_task_params *p)
* Load
* ========================== */
INTERNAL P_JobDef(font_load_asset_job, job)
internal P_JobDef(font_load_asset_job, job)
{
__prof;
TempArena scratch = BeginScratchNoConflict();
@ -85,8 +85,8 @@ INTERNAL P_JobDef(font_load_asset_job, job)
P_LogInfoF("Loading font \"%F\" (point size %F)", FMT_STR(path), FMT_FLOAT((f64)point_size));
i64 start_ns = P_TimeNs();
ASSERT(string_ends_with(path, LIT(".ttf")));
ASSERT(countof(g_font_codes) < LOOKUP_TABLE_SIZE);
Assert(string_ends_with(path, LIT(".ttf")));
Assert(countof(g_font_codes) < LOOKUP_TABLE_SIZE);
/* Decode */
R_Resource res = resource_open(path);
@ -100,7 +100,7 @@ INTERNAL P_JobDef(font_load_asset_job, job)
resource_close(&res);
/* Send texture to GPU */
G_Resource *texture = gp_texture_alloc(GP_TEXTURE_FORMAT_R8G8B8A8_UNORM, 0, V2i32FromXY(result.image_data.width, result.image_data.height), result.image_data.pixels);
G_Resource *texture = gp_texture_alloc(GP_TEXTURE_FORMAT_R8G8B8A8_UNORM, 0, V2i32FromXY(result.image_width, result.image_height), result.image_pixels);
/* Allocate store memory */
F_Font *font = 0;
@ -114,8 +114,8 @@ INTERNAL P_JobDef(font_load_asset_job, job)
/* Set font data */
font->texture = texture;
font->image_width = result.image_data.width;
font->image_height = result.image_data.height;
font->image_width = result.image_width;
font->image_height = result.image_height;
font->glyphs_count = result.glyphs_count;
font->point_size = point_size;
@ -127,7 +127,7 @@ INTERNAL P_JobDef(font_load_asset_job, job)
}
/* Copy glyphs from decode result */
STATIC_ASSERT(sizeof(*font->glyphs) == sizeof(*result.glyphs)); /* Font glyph size must match TTF glyph size for memcpy */
StaticAssert(sizeof(*font->glyphs) == sizeof(*result.glyphs)); /* Font glyph size must match TTF glyph size for memcpy */
MEMCPY(font->glyphs, result.glyphs, sizeof(*font->glyphs) * result.glyphs_count);
/* Build lookup table */
@ -138,7 +138,7 @@ INTERNAL P_JobDef(font_load_asset_job, job)
font_task_params_release(params);
P_LogSuccessF("Loaded font \"%F\" (point size %F) in %F seconds", FMT_STR(path), FMT_FLOAT((f64)point_size), FMT_FLOAT(SECONDS_FROM_NS(P_TimeNs() - start_ns)));
P_LogSuccessF("Loaded font \"%F\" (point size %F) in %F seconds", FMT_STR(path), FMT_FLOAT((f64)point_size), FMT_FLOAT(SecondsFromNs(P_TimeNs() - start_ns)));
asset_cache_mark_ready(asset, font);
EndScratch(scratch);

View File

@ -2,7 +2,7 @@
#include "../kernel/kernel.h"
#if PLATFORM_WINDOWS
#if PlatformIsWindows
# include "gp_core_dx12.c"
#else
# error Gp core not implemented for this platform

View File

@ -14,7 +14,7 @@
#pragma comment(lib, "dxguid")
#pragma comment(lib, "d3dcompiler")
#if PROFILING_GPU
#if ProfilingIsEnabled_GPU
/* For RegOpenKeyEx */
# include <winreg.h>
# pragma comment(lib, "advapi32")
@ -30,7 +30,7 @@
#define DX12_NUM_RTV_DESCRIPTORS (1024 * 1)
#define DX12_COMMAND_BUFFER_MIN_SIZE (1024 * 64)
#define DX12_MULTI_QUEUE !PROFILING
#define DX12_MULTI_QUEUE !ProfilingIsEnabled
#if DX12_MULTI_QUEUE
# define DX12_QUEUE_DIRECT 0
# define DX12_QUEUE_COMPUTE 1
@ -45,7 +45,7 @@
# define DX12_NUM_QUEUES 1
#endif
#if RTC
#if RtcIsEnabled
# define DX12_DEBUG 1
# define DX12_SHADER_DEBUG 1
#else
@ -54,7 +54,7 @@
#endif
/* ========================== *
* Internal structs
* internal structs
* ========================== */
struct shader_desc {
@ -130,7 +130,7 @@ struct command_queue {
struct command_list_pool *cl_pool;
#if PROFILING_GPU
#if ProfilingIsEnabled_GPU
__prof_dx12_ctx(prof);
#endif
};
@ -262,49 +262,49 @@ struct fenced_release_data {
};
/* ========================== *
* Internal procs
* internal procs
* ========================== */
INTERNAL P_ExitFuncDef(gp_shutdown);
internal P_ExitFuncDef(gp_shutdown);
INTERNAL void dx12_init_device(void);
internal void dx12_init_device(void);
INTERNAL void dx12_init_objects(void);
internal void dx12_init_objects(void);
INTERNAL void dx12_init_pipelines(void);
internal void dx12_init_pipelines(void);
INTERNAL void dx12_init_noise(void);
internal void dx12_init_noise(void);
INTERNAL struct cpu_descriptor_heap *cpu_descriptor_heap_alloc(enum D3D12_DESCRIPTOR_HEAP_TYPE type);
internal struct cpu_descriptor_heap *cpu_descriptor_heap_alloc(enum D3D12_DESCRIPTOR_HEAP_TYPE type);
INTERNAL void command_queue_release(struct command_queue *cq);
internal void command_queue_release(struct command_queue *cq);
INTERNAL P_JobDef(dx12_evictor_job, _);
internal P_JobDef(dx12_evictor_job, _);
INTERNAL void fenced_release(void *data, enum fenced_release_kind kind);
internal void fenced_release(void *data, enum fenced_release_kind kind);
INTERNAL struct dx12_resource *dx12_resource_alloc(D3D12_HEAP_PROPERTIES heap_props, D3D12_HEAP_FLAGS heap_flags, D3D12_RESOURCE_DESC desc, D3D12_RESOURCE_STATES initial_state);
internal struct dx12_resource *dx12_resource_alloc(D3D12_HEAP_PROPERTIES heap_props, D3D12_HEAP_FLAGS heap_flags, D3D12_RESOURCE_DESC desc, D3D12_RESOURCE_STATES initial_state);
INTERNAL struct descriptor *descriptor_alloc(struct cpu_descriptor_heap *dh);
internal struct descriptor *descriptor_alloc(struct cpu_descriptor_heap *dh);
struct command_queue_alloc_job_sig { struct command_queue_desc *descs_in; struct command_queue **cqs_out; };
INTERNAL P_JobDef(command_queue_alloc_job, job);
internal P_JobDef(command_queue_alloc_job, job);
struct pipeline_alloc_job_sig { struct pipeline_desc *descs_in; struct pipeline **pipelines_out; };
INTERNAL P_JobDef(pipeline_alloc_job, job);
internal P_JobDef(pipeline_alloc_job, job);
struct dx12_upload_job_sig { struct dx12_resource *resource; void *data; };
INTERNAL P_JobDef(dx12_upload_job, job);
internal P_JobDef(dx12_upload_job, job);
#if RESOURCE_RELOADING
INTERNAL WATCH_CALLBACK_FUNC_DEF(pipeline_watch_callback, name);
internal WATCH_CALLBACK_FUNC_DEF(pipeline_watch_callback, name);
#endif
/* ========================== *
* Global state
* ========================== */
GLOBAL struct {
Global struct {
Atomic32 initialized;
/* Descriptor heaps pool */
@ -373,7 +373,7 @@ GLOBAL struct {
P_Mutex evictor_wake_mutex;
i64 evictor_wake_gen;
b32 evictor_shutdown;
} G = ZI, DEBUG_ALIAS(G, G_gp_dx12);
} G = ZI, DebugAlias(G, G_gp_dx12);
/* ========================== *
* Startup
@ -387,26 +387,26 @@ void gp_startup(void)
}
/* Initialize command descriptor heaps pool */
G.command_descriptor_heaps_arena = AllocArena(GIBI(64));
G.command_descriptor_heaps_arena = AllocArena(Gibi(64));
/* Initialize command buffers pool */
G.command_buffers_arena = AllocArena(GIBI(64));
G.command_buffers_arena = AllocArena(Gibi(64));
G.command_buffers_dict = dict_init(G.command_buffers_arena, 4096);
/* Initialize resources pool */
G.resources_arena = AllocArena(GIBI(64));
G.resources_arena = AllocArena(Gibi(64));
/* Initialize swapchains pool */
G.swapchains_arena = AllocArena(GIBI(64));
G.swapchains_arena = AllocArena(Gibi(64));
/* Initialize pipeline cache */
G.pipelines_arena = AllocArena(GIBI(64));
G.pipelines_arena = AllocArena(Gibi(64));
G.pipeline_descs = dict_init(G.pipelines_arena, 1024);
G.top_pipelines = dict_init(G.pipelines_arena, 1024);
G.top_successful_pipelines = dict_init(G.pipelines_arena, 1024);
/* Initialize fenced releases queue */
G.fenced_releases_arena = AllocArena(GIBI(64));
G.fenced_releases_arena = AllocArena(Gibi(64));
/* Initialize embedded shader archive */
String embedded_data = inc_dxc_tar();
@ -432,7 +432,7 @@ void gp_startup(void)
P_Run(1, dx12_evictor_job, 0, P_Pool_Background, P_Priority_Low, &G.evictor_job_counter);
}
INTERNAL P_ExitFuncDef(gp_shutdown)
internal P_ExitFuncDef(gp_shutdown)
{
__prof;
#if 0
@ -450,7 +450,7 @@ INTERNAL P_ExitFuncDef(gp_shutdown)
{
P_Lock lock = P_LockE(&G.evictor_wake_mutex);
G.evictor_shutdown = 1;
P_SignalCv(&G.evictor_wake_cv, I32_MAX);
P_SignalCv(&G.evictor_wake_cv, I32Max);
P_Unlock(&lock);
}
P_WaitOnCounter(&G.evictor_job_counter);
@ -460,7 +460,7 @@ INTERNAL P_ExitFuncDef(gp_shutdown)
* Dx12 device initialization
* ========================== */
INTERNAL void dx12_init_error(String error)
internal void dx12_init_error(String error)
{
TempArena scratch = BeginScratchNoConflict();
String msg = string_format(scratch.arena, LIT("Failed to initialize DirectX 12.\n\n%F"), FMT_STR(error));
@ -468,7 +468,7 @@ INTERNAL void dx12_init_error(String error)
EndScratch(scratch);
}
INTERNAL void dx12_init_device(void)
internal void dx12_init_device(void)
{
__prof;
TempArena scratch = BeginScratchNoConflict();
@ -585,7 +585,7 @@ INTERNAL void dx12_init_device(void)
}
#endif
#if PROFILING_GPU && PROFILING_GPU_STABLE_POWER_STATE
#if ProfilingIsEnabled_GPU && ProfilingIsEnabled_GPU_STABLE_POWER_STATE
/* Enable stable power state */
{
__profn("Set stable power state");
@ -627,7 +627,7 @@ INTERNAL void dx12_init_device(void)
* Dx12 object initialization
* ========================== */
INTERNAL void dx12_init_objects(void)
internal void dx12_init_objects(void)
{
__prof;
@ -662,7 +662,7 @@ INTERNAL void dx12_init_objects(void)
P_Run(DX12_NUM_QUEUES, command_queue_alloc_job, &sig, P_Pool_Inherit, P_Priority_Inherit, &counter);
P_WaitOnCounter(&counter);
}
#if PROFILING
#if ProfilingIsEnabled
{
/* Initialize serially for consistent order in profiler */
__profn("Initialize command queue profiling contexts");
@ -681,9 +681,9 @@ INTERNAL void dx12_init_objects(void)
* Dx12 pipeline initialization
* ========================== */
INTERNAL void pipeline_register(u64 num_pipelines, struct pipeline **pipelines);
internal void pipeline_register(u64 num_pipelines, struct pipeline **pipelines);
INTERNAL void dx12_init_pipelines(void)
internal void dx12_init_pipelines(void)
{
__prof;
TempArena scratch = BeginScratchNoConflict();
@ -759,7 +759,7 @@ INTERNAL void dx12_init_pipelines(void)
for (u32 i = 0; i < num_pipelines; ++i) {
struct pipeline *pipeline = pipelines[i];
if (pipeline->success) {
P_LogSuccessF("Successfully compiled pipeline \"%F\" in %F seconds", FMT_STR(pipeline->name), FMT_FLOAT(SECONDS_FROM_NS(pipeline->compilation_time_ns)));
P_LogSuccessF("Successfully compiled pipeline \"%F\" in %F seconds", FMT_STR(pipeline->name), FMT_FLOAT(SecondsFromNs(pipeline->compilation_time_ns)));
if (pipeline->error.len) {
String msg = string_format(scratch.arena, LIT("Warning while compiling pipeline \"%F\":\n%F"), FMT_STR(pipeline->name), FMT_STR(pipeline->error));
P_LogWarning(msg);
@ -780,7 +780,7 @@ INTERNAL void dx12_init_pipelines(void)
* Noise texture initialization
* ========================== */
INTERNAL void dx12_init_noise(void)
internal void dx12_init_noise(void)
{
TempArena scratch = BeginScratchNoConflict();
@ -867,7 +867,7 @@ struct shader_compile_job_sig {
struct shader_compile_result *results;
};
INTERNAL P_JobDef(shader_compile_job, job)
internal P_JobDef(shader_compile_job, job)
{
__prof;
struct shader_compile_job_sig *sig = job.sig;
@ -883,7 +883,7 @@ INTERNAL P_JobDef(shader_compile_job, job)
__profn("Compile shader");
P_LogInfoF("Compiling shader \"%F:%F\"", FMT_STR(desc->friendly_name), FMT_STR(desc->entry));
/* NOTE: `DXC_ARGS` is supplied by build system at compile time */
char *dxc_args_cstr = STRINGIZE(DXC_ARGS);
char *dxc_args_cstr = Stringize(DXC_ARGS);
String dxc_args_str = string_from_cstr_no_limit(dxc_args_cstr);
StringArray dxc_args_array = string_split(scratch.arena, dxc_args_str, LIT(" "));
String shader_args[] = {
@ -916,7 +916,7 @@ INTERNAL P_JobDef(shader_compile_job, job)
* Pipeline
* ========================== */
INTERNAL P_JobDef(pipeline_alloc_job, job)
internal P_JobDef(pipeline_alloc_job, job)
{
__prof;
struct pipeline_alloc_job_sig *sig = job.sig;
@ -1104,7 +1104,7 @@ INTERNAL P_JobDef(pipeline_alloc_job, job)
.IndependentBlendEnable = 1
};
for (i32 i = 0; i < (i32)countof(desc->rtvs); ++i) {
STATIC_ASSERT(countof(blend_desc.RenderTarget) <= countof(desc->rtvs));
StaticAssert(countof(blend_desc.RenderTarget) <= countof(desc->rtvs));
if (desc->rtvs[i].format != DXGI_FORMAT_UNKNOWN) {
b32 blending_enabled = desc->rtvs[i].blending;
blend_desc.RenderTarget[i].BlendEnable = blending_enabled;
@ -1140,7 +1140,7 @@ INTERNAL P_JobDef(pipeline_alloc_job, job)
pso_desc.InputLayout = input_layout_desc;
pso_desc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE;
for (i32 i = 0; i < (i32)countof(desc->rtvs); ++i) {
STATIC_ASSERT(countof(pso_desc.RTVFormats) <= countof(desc->rtvs));
StaticAssert(countof(pso_desc.RTVFormats) <= countof(desc->rtvs));
DXGI_FORMAT format = desc->rtvs[i].format;
if (format != DXGI_FORMAT_UNKNOWN) {
pso_desc.RTVFormats[pso_desc.NumRenderTargets++] = format;
@ -1186,7 +1186,7 @@ INTERNAL P_JobDef(pipeline_alloc_job, job)
EndScratch(scratch);
}
INTERNAL void pipeline_release_now(struct pipeline *pipeline)
internal void pipeline_release_now(struct pipeline *pipeline)
{
__prof;
if (pipeline->pso) {
@ -1204,7 +1204,7 @@ INTERNAL void pipeline_release_now(struct pipeline *pipeline)
* Pipeline cache
* ========================== */
INTERNAL struct pipeline_scope *pipeline_scope_begin(void)
internal struct pipeline_scope *pipeline_scope_begin(void)
{
__prof;
struct pipeline_scope *scope = 0;
@ -1220,7 +1220,7 @@ INTERNAL struct pipeline_scope *pipeline_scope_begin(void)
if (scope) {
arena = scope->arena;
} else {
arena = AllocArena(MEBI(64));
arena = AllocArena(Mebi(64));
}
ResetArena(arena);
scope = PushStruct(arena, struct pipeline_scope);
@ -1229,7 +1229,7 @@ INTERNAL struct pipeline_scope *pipeline_scope_begin(void)
return scope;
}
INTERNAL void pipeline_scope_end(struct pipeline_scope *scope)
internal void pipeline_scope_end(struct pipeline_scope *scope)
{
__prof;
P_Lock lock = P_LockE(&G.pipelines_mutex);
@ -1246,8 +1246,8 @@ INTERNAL void pipeline_scope_end(struct pipeline_scope *scope)
P_Unlock(&lock);
}
INTERNAL READONLY struct pipeline g_nil_pipeline = ZI;
INTERNAL struct pipeline *pipeline_from_name(struct pipeline_scope *scope, String name)
internal Readonly struct pipeline g_nil_pipeline = ZI;
internal struct pipeline *pipeline_from_name(struct pipeline_scope *scope, String name)
{
__prof;
struct pipeline *res = &g_nil_pipeline;
@ -1274,7 +1274,7 @@ INTERNAL struct pipeline *pipeline_from_name(struct pipeline_scope *scope, Strin
return res;
}
INTERNAL void pipeline_register(u64 num_pipelines, struct pipeline **pipelines)
internal void pipeline_register(u64 num_pipelines, struct pipeline **pipelines)
{
__prof;
P_Lock lock = P_LockE(&G.pipelines_mutex);
@ -1306,7 +1306,7 @@ INTERNAL void pipeline_register(u64 num_pipelines, struct pipeline **pipelines)
}
#if RESOURCE_RELOADING
INTERNAL WATCH_CALLBACK_FUNC_DEF(pipeline_watch_callback, name)
internal WATCH_CALLBACK_FUNC_DEF(pipeline_watch_callback, name)
{
__prof;
TempArena scratch = BeginScratchNoConflict();
@ -1384,7 +1384,7 @@ INTERNAL WATCH_CALLBACK_FUNC_DEF(pipeline_watch_callback, name)
struct shader_compile_desc *desc = &shader_descs[i];
struct shader_compile_result *result = &shader_results[i];
if (result->success) {
P_LogSuccessF("Finished compiling shader \"%F:%F\" in %F seconds", FMT_STR(desc->friendly_name), FMT_STR(desc->entry), FMT_FLOAT(SECONDS_FROM_NS(result->elapsed_ns)));
P_LogSuccessF("Finished compiling shader \"%F:%F\" in %F seconds", FMT_STR(desc->friendly_name), FMT_STR(desc->entry), FMT_FLOAT(SecondsFromNs(result->elapsed_ns)));
if (result->errors.len > 0) {
String msg = result->errors;
P_LogWarning(msg);
@ -1432,7 +1432,7 @@ INTERNAL WATCH_CALLBACK_FUNC_DEF(pipeline_watch_callback, name)
for (u32 i = 0; i < num_pipelines; ++i) {
struct pipeline *pipeline = pipelines[i];
if (pipeline->success) {
P_LogSuccessF("Successfully compiled pipeline \"%F\" in %F seconds", FMT_STR(pipeline->name), FMT_FLOAT(SECONDS_FROM_NS(pipeline->compilation_time_ns)));
P_LogSuccessF("Successfully compiled pipeline \"%F\" in %F seconds", FMT_STR(pipeline->name), FMT_FLOAT(SecondsFromNs(pipeline->compilation_time_ns)));
if (pipeline->error.len > 0) {
String msg = string_format(scratch.arena, LIT("Warning while compiling pipeline \"%F\":\n%F"), FMT_STR(pipeline->name), FMT_STR(pipeline->error));
P_LogWarning(msg);
@ -1467,7 +1467,7 @@ INTERNAL WATCH_CALLBACK_FUNC_DEF(pipeline_watch_callback, name)
* Descriptor
* ========================== */
INTERNAL struct descriptor *descriptor_alloc(struct cpu_descriptor_heap *dh)
internal struct descriptor *descriptor_alloc(struct cpu_descriptor_heap *dh)
{
__prof;
struct descriptor *d = 0;
@ -1497,7 +1497,7 @@ INTERNAL struct descriptor *descriptor_alloc(struct cpu_descriptor_heap *dh)
return d;
}
INTERNAL void descriptor_release(struct descriptor *descriptor)
internal void descriptor_release(struct descriptor *descriptor)
{
struct cpu_descriptor_heap *dh = descriptor->heap;
P_Lock lock = P_LockE(&dh->mutex);
@ -1512,12 +1512,12 @@ INTERNAL void descriptor_release(struct descriptor *descriptor)
* CPU descriptor heap
* ========================== */
INTERNAL struct cpu_descriptor_heap *cpu_descriptor_heap_alloc(enum D3D12_DESCRIPTOR_HEAP_TYPE type)
internal struct cpu_descriptor_heap *cpu_descriptor_heap_alloc(enum D3D12_DESCRIPTOR_HEAP_TYPE type)
{
__prof;
struct cpu_descriptor_heap *dh = 0;
{
Arena *arena = AllocArena(MEBI(64));
Arena *arena = AllocArena(Mebi(64));
dh = PushStruct(arena, struct cpu_descriptor_heap);
dh->arena = arena;
}
@ -1547,7 +1547,7 @@ INTERNAL struct cpu_descriptor_heap *cpu_descriptor_heap_alloc(enum D3D12_DESCRI
}
#if 0
INTERNAL void cpu_descriptor_heap_release(struct cpu_descriptor_heap *dh)
internal void cpu_descriptor_heap_release(struct cpu_descriptor_heap *dh)
{
/* TODO */
(UNUSED)dh;
@ -1558,7 +1558,7 @@ INTERNAL void cpu_descriptor_heap_release(struct cpu_descriptor_heap *dh)
* Fenced release
* ========================== */
INTERNAL void fenced_release(void *data, enum fenced_release_kind kind)
internal void fenced_release(void *data, enum fenced_release_kind kind)
{
struct fenced_release_data fr = ZI;
fr.kind = kind;
@ -1591,7 +1591,7 @@ INTERNAL void fenced_release(void *data, enum fenced_release_kind kind)
P_Lock lock = P_LockE(&G.evictor_wake_mutex);
{
++G.evictor_wake_gen;
P_SignalCv(&G.evictor_wake_cv, I32_MAX);
P_SignalCv(&G.evictor_wake_cv, I32Max);
}
P_Unlock(&lock);
}
@ -1601,7 +1601,7 @@ INTERNAL void fenced_release(void *data, enum fenced_release_kind kind)
* Resource
* ========================== */
INTERNAL struct dx12_resource *dx12_resource_alloc(D3D12_HEAP_PROPERTIES heap_props, D3D12_HEAP_FLAGS heap_flags, D3D12_RESOURCE_DESC desc, D3D12_RESOURCE_STATES initial_state)
internal struct dx12_resource *dx12_resource_alloc(D3D12_HEAP_PROPERTIES heap_props, D3D12_HEAP_FLAGS heap_flags, D3D12_RESOURCE_DESC desc, D3D12_RESOURCE_STATES initial_state)
{
__prof;
struct dx12_resource *r = 0;
@ -1634,7 +1634,7 @@ INTERNAL struct dx12_resource *dx12_resource_alloc(D3D12_HEAP_PROPERTIES heap_pr
return r;
}
INTERNAL void dx12_resource_release_now(struct dx12_resource *t)
internal void dx12_resource_release_now(struct dx12_resource *t)
{
__prof;
@ -1679,7 +1679,7 @@ struct dx12_resource_barrier_desc {
enum D3D12_RESOURCE_STATES new_state; /* 0 if type != D3D12_RESOURCE_BARRIER_TYPE_TRANSITION */
};
INTERNAL void dx12_resource_barriers(ID3D12GraphicsCommandList *cl, i32 num_descs, struct dx12_resource_barrier_desc *descs)
internal void dx12_resource_barriers(ID3D12GraphicsCommandList *cl, i32 num_descs, struct dx12_resource_barrier_desc *descs)
{
__prof;
TempArena scratch = BeginScratchNoConflict();
@ -1712,7 +1712,7 @@ INTERNAL void dx12_resource_barriers(ID3D12GraphicsCommandList *cl, i32 num_desc
rb->UAV.pResource = resource->resource;
} else {
/* Unknown barrier type */
ASSERT(0);
Assert(0);
}
}
@ -1727,9 +1727,9 @@ INTERNAL void dx12_resource_barriers(ID3D12GraphicsCommandList *cl, i32 num_desc
* Command queue
* ========================== */
INTERNAL struct command_list_pool *command_list_pool_alloc(struct command_queue *cq);
internal struct command_list_pool *command_list_pool_alloc(struct command_queue *cq);
INTERNAL P_JobDef(command_queue_alloc_job, job)
internal P_JobDef(command_queue_alloc_job, job)
{
__prof;
struct command_queue_alloc_job_sig *sig = job.sig;
@ -1737,7 +1737,7 @@ INTERNAL P_JobDef(command_queue_alloc_job, job)
{
struct command_queue *cq = 0;
{
Arena *arena = AllocArena(GIBI(64));
Arena *arena = AllocArena(Gibi(64));
cq = PushStruct(arena, struct command_queue);
cq->arena = arena;
}
@ -1763,7 +1763,7 @@ INTERNAL P_JobDef(command_queue_alloc_job, job)
}
}
INTERNAL void command_queue_release(struct command_queue *cq)
internal void command_queue_release(struct command_queue *cq)
{
__prof;
/* TODO */
@ -1775,11 +1775,11 @@ INTERNAL void command_queue_release(struct command_queue *cq)
* Command list
* ========================== */
INTERNAL struct command_list_pool *command_list_pool_alloc(struct command_queue *cq)
internal struct command_list_pool *command_list_pool_alloc(struct command_queue *cq)
{
struct command_list_pool *pool = 0;
{
Arena *arena = AllocArena(GIBI(64));
Arena *arena = AllocArena(Gibi(64));
pool = PushStruct(arena, struct command_list_pool);
pool->arena = arena;
}
@ -1787,7 +1787,7 @@ INTERNAL struct command_list_pool *command_list_pool_alloc(struct command_queue
return pool;
}
INTERNAL struct command_list *command_list_open(struct command_list_pool *pool)
internal struct command_list *command_list_open(struct command_list_pool *pool)
{
__prof;
struct command_queue *cq = pool->cq;
@ -1867,7 +1867,7 @@ INTERNAL struct command_list *command_list_open(struct command_list_pool *pool)
}
/* TODO: Allow multiple command list submissions */
INTERNAL u64 command_list_close(struct command_list *cl)
internal u64 command_list_close(struct command_list *cl)
{
__prof;
struct command_queue *cq = cl->cq;
@ -1952,10 +1952,10 @@ INTERNAL u64 command_list_close(struct command_list *cl)
* Command descriptor heap (GPU / shader visible descriptor heap)
* ========================== */
INTERNAL struct command_descriptor_heap *command_list_push_descriptor_heap(struct command_list *cl, struct cpu_descriptor_heap *dh_cpu)
internal struct command_descriptor_heap *command_list_push_descriptor_heap(struct command_list *cl, struct cpu_descriptor_heap *dh_cpu)
{
__prof;
ASSERT(dh_cpu->type == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV); /* Src heap must have expected type */
Assert(dh_cpu->type == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV); /* Src heap must have expected type */
/* Allocate GPU heap */
struct command_descriptor_heap *cdh = 0;
@ -2033,13 +2033,13 @@ INTERNAL struct command_descriptor_heap *command_list_push_descriptor_heap(struc
* Command buffer
* ========================== */
INTERNAL u64 command_buffer_hash_from_size(u64 size)
internal u64 command_buffer_hash_from_size(u64 size)
{
u64 hash = rand_u64_from_seed(size);
return hash;
}
INTERNAL u64 align_up_pow2(u64 v)
internal u64 align_up_pow2(u64 v)
{
u64 res = 0;
if (v > 0) {
@ -2056,12 +2056,12 @@ INTERNAL u64 align_up_pow2(u64 v)
}
#define command_list_push_buffer(cl, count, elems) _command_list_push_buffer((cl), count * ((elems) ? sizeof(*(elems)) : 0), (elems), (elems) ? sizeof(*(elems)) : 1)
INTERNAL struct command_buffer *_command_list_push_buffer(struct command_list *cl, u64 data_len, void *data, u64 data_stride)
internal struct command_buffer *_command_list_push_buffer(struct command_list *cl, u64 data_len, void *data, u64 data_stride)
{
__prof;
/* Data length should be a multiple of stride */
ASSERT(data_len % data_stride == 0);
Assert(data_len % data_stride == 0);
/* Determine size */
u64 size = max_u64(DX12_COMMAND_BUFFER_MIN_SIZE, align_up_pow2(data_len));
@ -2185,7 +2185,7 @@ struct dx12_wait_fence_job_sig {
u64 target;
};
INTERNAL P_JobDef(dx12_wait_fence_job, job)
internal P_JobDef(dx12_wait_fence_job, job)
{
__prof;
struct dx12_wait_fence_job_sig *sig = job.sig;
@ -2210,7 +2210,7 @@ G_Resource *gp_texture_alloc(G_TextureFormat format, u32 flags, V2i32 size, void
if (size.x <= 0 || size.y <= 0) {
P_Panic(LIT("Tried to create texture with dimension <= 0"));
}
LOCAL_PERSIST const DXGI_FORMAT formats[] = {
LocalPersist const DXGI_FORMAT formats[] = {
[GP_TEXTURE_FORMAT_R8_UNORM] = DXGI_FORMAT_R8_UNORM,
[GP_TEXTURE_FORMAT_R8G8B8A8_UNORM] = DXGI_FORMAT_R8G8B8A8_UNORM,
[GP_TEXTURE_FORMAT_R8G8B8A8_UNORM_SRGB] = DXGI_FORMAT_R8G8B8A8_UNORM_SRGB,
@ -2281,13 +2281,13 @@ V2i32 gp_texture_get_size(G_Resource *resource)
* Upload
* ========================== */
INTERNAL P_JobDef(dx12_upload_job, job)
internal P_JobDef(dx12_upload_job, job)
{
struct dx12_upload_job_sig *sig = job.sig;
struct dx12_resource *r = sig->resource;
void *data = sig->data;
ASSERT(r->state == D3D12_RESOURCE_STATE_COPY_DEST);
Assert(r->state == D3D12_RESOURCE_STATE_COPY_DEST);
D3D12_RESOURCE_DESC desc = ZI;
ID3D12Resource_GetDesc(r->resource, &desc);
@ -2353,7 +2353,7 @@ INTERNAL P_JobDef(dx12_upload_job, job)
/* Copy from upload heap to texture */
{
__profnc_dx12(cl->cq->prof, cl->cl, "Upload texture", RGB32_F(0.2, 0.5, 0.2));
__profnc_dx12(cl->cq->prof, cl->cl, "Upload texture", Rgb32F(0.2, 0.5, 0.2));
D3D12_TEXTURE_COPY_LOCATION dst_loc = {
.pResource = r->resource,
.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX,
@ -2389,7 +2389,7 @@ INTERNAL P_JobDef(dx12_upload_job, job)
* Run utils
* ========================== */
INTERNAL void command_list_set_pipeline(struct command_list *cl, struct pipeline *pipeline)
internal void command_list_set_pipeline(struct command_list *cl, struct pipeline *pipeline)
{
ID3D12GraphicsCommandList_SetPipelineState(cl->cl, pipeline->pso);
if (pipeline->is_gfx) {
@ -2400,11 +2400,11 @@ INTERNAL void command_list_set_pipeline(struct command_list *cl, struct pipeline
cl->cur_pipeline = pipeline;
}
INTERNAL void command_list_set_sig(struct command_list *cl, void *src, u32 size)
internal void command_list_set_sig(struct command_list *cl, void *src, u32 size)
{
__prof;
ASSERT(size % 16 == 0); /* Root constant structs must pad to 16 bytes */
ASSERT(size <= 256); /* Only 64 32-bit root constants allowed in signature */
Assert(size % 16 == 0); /* Root constant structs must pad to 16 bytes */
Assert(size <= 256); /* Only 64 32-bit root constants allowed in signature */
u32 num32bit = size / 4;
b32 is_gfx = cl->cur_pipeline->is_gfx;
for (u32 i = 0; i < num32bit; ++i) {
@ -2418,7 +2418,7 @@ INTERNAL void command_list_set_sig(struct command_list *cl, void *src, u32 size)
}
}
INTERNAL struct D3D12_VIEWPORT viewport_from_rect(Rect r)
internal struct D3D12_VIEWPORT viewport_from_rect(Rect r)
{
struct D3D12_VIEWPORT viewport = ZI;
viewport.TopLeftX = r.x;
@ -2430,7 +2430,7 @@ INTERNAL struct D3D12_VIEWPORT viewport_from_rect(Rect r)
return viewport;
}
INTERNAL D3D12_RECT scissor_from_rect(Rect r)
internal D3D12_RECT scissor_from_rect(Rect r)
{
D3D12_RECT scissor = ZI;
scissor.left = r.x;
@ -2440,7 +2440,7 @@ INTERNAL D3D12_RECT scissor_from_rect(Rect r)
return scissor;
}
INTERNAL D3D12_VERTEX_BUFFER_VIEW vbv_from_command_buffer(struct command_buffer *cb, u32 vertex_size)
internal D3D12_VERTEX_BUFFER_VIEW vbv_from_command_buffer(struct command_buffer *cb, u32 vertex_size)
{
D3D12_VERTEX_BUFFER_VIEW vbv = ZI;
vbv.BufferLocation = cb->resource->gpu_address;
@ -2449,7 +2449,7 @@ INTERNAL D3D12_VERTEX_BUFFER_VIEW vbv_from_command_buffer(struct command_buffer
return vbv;
}
INTERNAL D3D12_INDEX_BUFFER_VIEW ibv_from_command_buffer(struct command_buffer *cb, DXGI_FORMAT format)
internal D3D12_INDEX_BUFFER_VIEW ibv_from_command_buffer(struct command_buffer *cb, DXGI_FORMAT format)
{
D3D12_INDEX_BUFFER_VIEW ibv = ZI;
ibv.BufferLocation = cb->resource->gpu_address;
@ -2458,7 +2458,7 @@ INTERNAL D3D12_INDEX_BUFFER_VIEW ibv_from_command_buffer(struct command_buffer *
return ibv;
}
INTERNAL struct dx12_resource *gbuff_alloc(DXGI_FORMAT format, V2i32 size, D3D12_RESOURCE_STATES initial_state)
internal struct dx12_resource *gbuff_alloc(DXGI_FORMAT format, V2i32 size, D3D12_RESOURCE_STATES initial_state)
{
__prof;
D3D12_HEAP_PROPERTIES heap_props = { .Type = D3D12_HEAP_TYPE_DEFAULT };
@ -2493,14 +2493,14 @@ INTERNAL struct dx12_resource *gbuff_alloc(DXGI_FORMAT format, V2i32 size, D3D12
}
/* Calculate the view projection matrix */
INLINE Mat4x4 calculate_vp(Xform view, f32 viewport_width, f32 viewport_height)
Inline Mat4x4 calculate_vp(Xform view, f32 viewport_width, f32 viewport_height)
{
Mat4x4 projection = mat4x4_from_ortho(0.0, viewport_width, viewport_height, 0.0, -1.0, 1.0);
Mat4x4 view4x4 = mat4x4_from_xform(view);
return mat4x4_mul(projection, view4x4);
}
INTERNAL D3D12_GPU_DESCRIPTOR_HANDLE gpu_handle_from_descriptor(struct descriptor *descriptor, struct command_descriptor_heap *cdh)
internal D3D12_GPU_DESCRIPTOR_HANDLE gpu_handle_from_descriptor(struct descriptor *descriptor, struct command_descriptor_heap *cdh)
{
struct D3D12_GPU_DESCRIPTOR_HANDLE res = ZI;
res.ptr = cdh->start_gpu_handle.ptr + descriptor->index * G.desc_sizes[descriptor->heap->type];
@ -2570,26 +2570,26 @@ struct material_grid_desc {
u32 y_color;
};
INTERNAL struct render_sig *render_sig_alloc(void)
internal struct render_sig *render_sig_alloc(void)
{
__prof;
struct render_sig *sig = 0;
{
Arena *arena = AllocArena(MEBI(64));
Arena *arena = AllocArena(Mebi(64));
sig = PushStruct(arena, struct render_sig);
sig->arena = arena;
}
sig->material_instance_descs_arena = AllocArena(GIBI(1));
sig->material_grid_descs_arena = AllocArena(GIBI(1));
sig->ui_rect_instance_descs_arena = AllocArena(GIBI(1));
sig->ui_shape_verts_arena = AllocArena(GIBI(1));
sig->ui_shape_indices_arena = AllocArena(GIBI(1));
sig->material_instance_descs_arena = AllocArena(Gibi(1));
sig->material_grid_descs_arena = AllocArena(Gibi(1));
sig->ui_rect_instance_descs_arena = AllocArena(Gibi(1));
sig->ui_shape_verts_arena = AllocArena(Gibi(1));
sig->ui_shape_indices_arena = AllocArena(Gibi(1));
return sig;
}
INTERNAL void render_sig_reset(struct render_sig *sig)
internal void render_sig_reset(struct render_sig *sig)
{
__prof;
@ -2745,7 +2745,7 @@ G_Resource *gp_run_render(G_RenderSig *gp_render_sig, G_RenderParams params)
struct command_list *cl = command_list_open(cq->cl_pool);
{
__profn("Run render");
__profnc_dx12(cl->cq->prof, cl->cl, "Run render", RGB32_F(0.5, 0.2, 0.2));
__profnc_dx12(cl->cq->prof, cl->cl, "Run render", Rgb32F(0.5, 0.2, 0.2));
Mat4x4 world_to_render_vp_matrix = calculate_vp(world_to_render_xf, render_viewport.width, render_viewport.height);
Mat4x4 ui_vp_matrix = calculate_vp(XFORM_IDENT, ui_viewport.width, ui_viewport.height);
Mat4x4 blit_vp_matrix = ZI;
@ -2759,7 +2759,7 @@ G_Resource *gp_run_render(G_RenderSig *gp_render_sig, G_RenderParams params)
/* Upload dummmy vert & index buffer */
/* TODO: Make these static */
/* Dummy vertex buffer */
LOCAL_PERSIST u16 quad_indices[6] = { 0, 1, 2, 0, 2, 3 };
LocalPersist u16 quad_indices[6] = { 0, 1, 2, 0, 2, 3 };
struct command_buffer *dummy_vertex_buffer = command_list_push_buffer(cl, 0, (u8 *)0);
struct command_buffer *quad_index_buffer = command_list_push_buffer(cl, countof(quad_indices), quad_indices);
@ -2851,7 +2851,7 @@ G_Resource *gp_run_render(G_RenderSig *gp_render_sig, G_RenderParams params)
/* Clear */
{
__profn("Clear gbuffers");
__profnc_dx12(cl->cq->prof, cl->cl, "Clear gbuffers", RGB32_F(0.5, 0.2, 0.2));
__profnc_dx12(cl->cq->prof, cl->cl, "Clear gbuffers", Rgb32F(0.5, 0.2, 0.2));
f32 clear_color[] = { 0.0f, 0.0f, 0.0f, 0.0f };
ID3D12GraphicsCommandList_ClearRenderTargetView(cl->cl, rsig->albedo->rtv_descriptor->handle, clear_color, 0, 0);
ID3D12GraphicsCommandList_ClearRenderTargetView(cl->cl, rsig->emittance->rtv_descriptor->handle, clear_color, 0, 0);
@ -2861,7 +2861,7 @@ G_Resource *gp_run_render(G_RenderSig *gp_render_sig, G_RenderParams params)
/* Material pass */
if (material_pipeline->success) {
__profn("Material pass");
__profnc_dx12(cl->cq->prof, cl->cl, "Material pass", RGB32_F(0.5, 0.2, 0.2));
__profnc_dx12(cl->cq->prof, cl->cl, "Material pass", Rgb32F(0.5, 0.2, 0.2));
/* Bind pipeline */
command_list_set_pipeline(cl, material_pipeline);
@ -2908,7 +2908,7 @@ G_Resource *gp_run_render(G_RenderSig *gp_render_sig, G_RenderParams params)
/* Flood pass */
if (flood_pipeline->success && !params.effects_disabled) {
__profn("Flood pass");
__profnc_dx12(cl->cq->prof, cl->cl, "Flood pass", RGB32_F(0.5, 0.2, 0.2));
__profnc_dx12(cl->cq->prof, cl->cl, "Flood pass", Rgb32F(0.5, 0.2, 0.2));
/* Bind pipeline */
command_list_set_pipeline(cl, flood_pipeline);
@ -2920,7 +2920,7 @@ G_Resource *gp_run_render(G_RenderSig *gp_render_sig, G_RenderParams params)
u64 step = 0;
while (step_length != 0 && step < max_steps) {
__profn("Flood step");
__profnc_dx12(cl->cq->prof, cl->cl, "Flood step", RGB32_F(0.5, 0.2, 0.2));
__profnc_dx12(cl->cq->prof, cl->cl, "Flood step", Rgb32F(0.5, 0.2, 0.2));
/* UAV barrier */
{
@ -2979,7 +2979,7 @@ G_Resource *gp_run_render(G_RenderSig *gp_render_sig, G_RenderParams params)
/* Clear */
{
__profn("Clear shade target");
__profnc_dx12(cl->cq->prof, cl->cl, "Clear shade target", RGB32_F(0.5, 0.2, 0.2));
__profnc_dx12(cl->cq->prof, cl->cl, "Clear shade target", Rgb32F(0.5, 0.2, 0.2));
f32 clear_color[] = { 0.0f, 0.0f, 0.0f, 0.0f };
ID3D12GraphicsCommandList_ClearUnorderedAccessViewFloat(cl->cl, gpu_handle_from_descriptor(rsig->shade_target->uav_descriptor, descriptor_heap), rsig->shade_target->uav_descriptor->handle, rsig->shade_target->resource, clear_color, 0, 0);
}
@ -2988,7 +2988,7 @@ G_Resource *gp_run_render(G_RenderSig *gp_render_sig, G_RenderParams params)
/* Shade pass */
if (shade_pipeline->success) {
__profn("Shade pass");
__profnc_dx12(cl->cq->prof, cl->cl, "Shade pass", RGB32_F(0.5, 0.2, 0.2));
__profnc_dx12(cl->cq->prof, cl->cl, "Shade pass", Rgb32F(0.5, 0.2, 0.2));
/* Bind pipeline */
command_list_set_pipeline(cl, shade_pipeline);
@ -3040,7 +3040,7 @@ G_Resource *gp_run_render(G_RenderSig *gp_render_sig, G_RenderParams params)
/* Clear */
{
__profn("Clear ui target");
__profnc_dx12(cl->cq->prof, cl->cl, "Clear ui target", RGB32_F(0.5, 0.2, 0.2));
__profnc_dx12(cl->cq->prof, cl->cl, "Clear ui target", Rgb32F(0.5, 0.2, 0.2));
f32 clear_color[] = { 0.0f, 0.0f, 0.0f, 0.0f };
ID3D12GraphicsCommandList_ClearRenderTargetView(cl->cl, rsig->ui_target->rtv_descriptor->handle, clear_color, 0, 0);
}
@ -3049,7 +3049,7 @@ G_Resource *gp_run_render(G_RenderSig *gp_render_sig, G_RenderParams params)
/* UI blit pass */
if (blit_pipeline->success) {
__profn("UI blit pass");
__profnc_dx12(cl->cq->prof, cl->cl, "UI blit pass", RGB32_F(0.5, 0.2, 0.2));
__profnc_dx12(cl->cq->prof, cl->cl, "UI blit pass", Rgb32F(0.5, 0.2, 0.2));
/* Bind pipeline */
command_list_set_pipeline(cl, blit_pipeline);
@ -3081,7 +3081,7 @@ G_Resource *gp_run_render(G_RenderSig *gp_render_sig, G_RenderParams params)
/* UI rect pass */
if (ui_pipeline->success) {
__profn("UI rect pass");
__profnc_dx12(cl->cq->prof, cl->cl, "UI rect pass", RGB32_F(0.5, 0.2, 0.2));
__profnc_dx12(cl->cq->prof, cl->cl, "UI rect pass", Rgb32F(0.5, 0.2, 0.2));
/* Bind pipeline */
command_list_set_pipeline(cl, ui_pipeline);
@ -3111,7 +3111,7 @@ G_Resource *gp_run_render(G_RenderSig *gp_render_sig, G_RenderParams params)
/* UI shape pass */
if (shape_pipeline->success) {
__profn("UI shape pass");
__profnc_dx12(cl->cq->prof, cl->cl, "UI shape pass", RGB32_F(0.5, 0.2, 0.2));
__profnc_dx12(cl->cq->prof, cl->cl, "UI shape pass", Rgb32F(0.5, 0.2, 0.2));
/* Bind pipeline */
command_list_set_pipeline(cl, shape_pipeline);
@ -3182,7 +3182,7 @@ G_MemoryInfo gp_query_memory_info(void)
* Swapchain
* ========================== */
INTERNAL void swapchain_init_resources(struct swapchain *swapchain)
internal void swapchain_init_resources(struct swapchain *swapchain)
{
for (u32 i = 0; i < countof(swapchain->buffers); ++i) {
ID3D12Resource *resource = 0;
@ -3250,7 +3250,7 @@ G_Swapchain *gp_swapchain_alloc(P_Window *window, V2i32 resolution)
#if DX12_WAIT_FRAME_LATENCY > 0
IDXGISwapChain3_SetMaximumFrameLatency(swapchain->swapchain, DX12_WAIT_FRAME_LATENCY);
swapchain->waitable = IDXGISwapChain2_GetFrameLatencyWaitableObject(swapchain->swapchain);
ASSERT(swapchain->waitable);
Assert(swapchain->waitable);
#endif
/* Disable Alt+Enter changing monitor resolution to match window size */
@ -3282,7 +3282,7 @@ void gp_swapchain_wait(G_Swapchain *gp_swapchain)
#endif
}
INTERNAL struct swapchain_buffer *update_swapchain(struct swapchain *swapchain, V2i32 resolution)
internal struct swapchain_buffer *update_swapchain(struct swapchain *swapchain, V2i32 resolution)
{
__prof;
resolution.x = max_i32(resolution.x, 1);
@ -3335,7 +3335,7 @@ INTERNAL struct swapchain_buffer *update_swapchain(struct swapchain *swapchain,
* Present
* ========================== */
INTERNAL void present_blit(struct swapchain_buffer *dst, struct dx12_resource *src, Xform src_xf)
internal void present_blit(struct swapchain_buffer *dst, struct dx12_resource *src, Xform src_xf)
{
__prof;
struct pipeline_scope *pipeline_scope = pipeline_scope_begin();
@ -3345,13 +3345,13 @@ INTERNAL void present_blit(struct swapchain_buffer *dst, struct dx12_resource *s
struct command_list *cl = command_list_open(cq->cl_pool);
{
__profn("Present blit");
__profnc_dx12(cl->cq->prof, cl->cl, "Present blit", RGB32_F(0.5, 0.2, 0.2));
__profnc_dx12(cl->cq->prof, cl->cl, "Present blit", Rgb32F(0.5, 0.2, 0.2));
struct swapchain *swapchain = dst->swapchain;
/* Upload dummmy vert & index buffer */
/* TODO: Make these static */
/* Dummy vertex buffer */
LOCAL_PERSIST u16 quad_indices[6] = { 0, 1, 2, 0, 2, 3 };
LocalPersist u16 quad_indices[6] = { 0, 1, 2, 0, 2, 3 };
struct command_buffer *dummy_vertex_buffer = command_list_push_buffer(cl, 0, (u8 *)0);
struct command_buffer *quad_index_buffer = command_list_push_buffer(cl, countof(quad_indices), quad_indices);
@ -3454,11 +3454,11 @@ void gp_present(G_Swapchain *gp_swapchain, V2i32 backbuffer_resolution, G_Resour
__profn("Present");
HRESULT hr = IDXGISwapChain3_Present(swapchain->swapchain, vsync, present_flags);
if (!SUCCEEDED(hr)) {
ASSERT(0);
Assert(0);
}
}
#if PROFILING_GPU
#if ProfilingIsEnabled_GPU
{
__profframe(0);
@ -3487,7 +3487,7 @@ void gp_present(G_Swapchain *gp_swapchain, V2i32 backbuffer_resolution, G_Resour
* Evictor thread
* ========================== */
INTERNAL P_JobDef(dx12_evictor_job, _)
internal P_JobDef(dx12_evictor_job, _)
{
(UNUSED)_;
@ -3545,7 +3545,7 @@ INTERNAL P_JobDef(dx12_evictor_job, _)
default:
{
/* Unknown handle type */
ASSERT(0);
Assert(0);
} break;
case FENCED_RELEASE_KIND_RESOURCE:

View File

@ -4,7 +4,7 @@
* changes to an embedded file. */
#if RESOURCES_EMBEDDED
INCBIN_INCLUDE(res_tar, INCBIN_DIR "res.tar");
INCBIN_INCLUDE(res_tar, IncbinDir "res.tar");
String inc_res_tar(void)
{
return INCBIN_GET(res_tar);
@ -12,7 +12,7 @@ String inc_res_tar(void)
#endif
INCBIN_INCLUDE(dxc_tar, INCBIN_DIR "dxc.tar");
INCBIN_INCLUDE(dxc_tar, IncbinDir "dxc.tar");
String inc_dxc_tar(void)
{
return INCBIN_GET(dxc_tar);

View File

@ -63,19 +63,19 @@ enum lex_number_state {
LEX_NUMBER_STATE_EXPONENT
};
GLOBAL READONLY String g_keyword_strings[] = {
Global Readonly String g_keyword_strings[] = {
['t'] = LIT_NOCAST("true"),
['f'] = LIT_NOCAST("false"),
['n'] = LIT_NOCAST("null")
};
GLOBAL READONLY enum token_type g_keyword_types[] = {
Global Readonly enum token_type g_keyword_types[] = {
['t'] = TOKEN_TYPE_KEYWORD_TRUE,
['f'] = TOKEN_TYPE_KEYWORD_FALSE,
['n'] = TOKEN_TYPE_KEYWORD_NULL
};
INTERNAL struct token *push_token(Arena *arena, struct token_list *list)
internal struct token *push_token(Arena *arena, struct token_list *list)
{
struct token *t = PushStruct(arena, struct token);
if (!list->token_first) {
@ -87,7 +87,7 @@ INTERNAL struct token *push_token(Arena *arena, struct token_list *list)
return t;
}
INTERNAL struct token_list lex(Arena *arena, String src)
internal struct token_list lex(Arena *arena, String src)
{
struct token_list res = ZI;
@ -354,13 +354,13 @@ INTERNAL struct token_list lex(Arena *arena, String src)
* Interpret
* ========================== */
INTERNAL void append_char(Arena *arena, String *str, u8 c)
internal void append_char(Arena *arena, String *str, u8 c)
{
*PushStructNoZero(arena, u8) = c;
++str->len;
}
INTERNAL f64 interpret_number(String src)
internal f64 interpret_number(String src)
{
b32 whole_present = 0;
u64 whole_left = 0;
@ -443,7 +443,7 @@ INTERNAL f64 interpret_number(String src)
default: {
/* Unreachable */
ASSERT(0);
Assert(0);
++pos;
} break;
}
@ -458,7 +458,7 @@ INTERNAL f64 interpret_number(String src)
default: {
/* Unreachable */
ASSERT(0);
Assert(0);
++pos;
} break;
}
@ -466,7 +466,7 @@ INTERNAL f64 interpret_number(String src)
default: {
/* Unreachable */
ASSERT(0);
Assert(0);
++pos;
} break;
}
@ -522,7 +522,7 @@ INTERNAL f64 interpret_number(String src)
return res;
}
INTERNAL String interpret_string(Arena *arena, String src, String *error)
internal String interpret_string(Arena *arena, String src, String *error)
{
String res = {
.len = 0,
@ -642,7 +642,7 @@ struct parser {
JSON_ErrorList errors;
};
INTERNAL void push_error(Arena *arena, struct parser *p, struct token *t, String msg)
internal void push_error(Arena *arena, struct parser *p, struct token *t, String msg)
{
JSON_Error *error = PushStruct(arena, JSON_Error);
error->msg = msg;
@ -659,7 +659,7 @@ INTERNAL void push_error(Arena *arena, struct parser *p, struct token *t, String
++list->count;
}
INTERNAL void parse(Arena *arena, struct parser *p)
internal void parse(Arena *arena, struct parser *p)
{
TempArena scratch = BeginScratch(arena);

View File

@ -1,5 +1,5 @@
/* Determine if file was included from C or from HLSL */
#if defined(LANGUAGE_C) || defined(LANGUAGE_CPP)
#if defined(LanguageIsC) || defined(LanguageIsCpp)
# define K_IS_CPU 1
#else
# define K_IS_CPU 0
@ -7,82 +7,82 @@
#if K_IS_CPU
#define K_STRUCT(s) PACK(struct s)
#define K_DECL(t, n) struct CAT(K_, t) n
#define K_STRUCT(s) Packed(struct s)
#define K_DECL(t, n) struct Cat(K_, t) n
#define K_DECLS(t, n) K_DECL(t, n)
#define K_STATIC_ASSERT(c) STATIC_ASSERT(c)
#define K_STATIC_ASSERT(c) StaticAssert(c)
typedef struct K_uint K_uint;
struct K_uint { u32 v; };
INLINE struct K_uint K_UintFromU32(u32 v)
Inline struct K_uint K_UintFromU32(u32 v)
{
return (struct K_uint) { .v = v };
}
typedef struct K_int K_int;
struct K_int { i32 v; };
INLINE struct K_int K_IntFromI32(i32 v)
Inline struct K_int K_IntFromI32(i32 v)
{
return (struct K_int) { .v = v };
}
typedef struct K_uint2 K_uint2;
struct K_uint2 { u32 v[2]; };
INLINE struct K_uint2 K_Uint2FromU32(u32 x, u32 y)
Inline struct K_uint2 K_Uint2FromU32(u32 x, u32 y)
{
return (struct K_uint2) { .v[0] = x, .v[1] = y };
}
typedef struct K_uint3 K_uint3;
struct K_uint3 { u32 v[3]; };
INLINE struct K_uint3 K_Uint3FromU32(u32 x, u32 y, u32 z)
Inline struct K_uint3 K_Uint3FromU32(u32 x, u32 y, u32 z)
{
return (struct K_uint3) { .v[0] = x, .v[1] = y, .v[2] = z };
}
typedef struct K_uint4 K_uint4;
struct K_uint4 { u32 v[4]; };
INLINE struct K_uint4 K_Uint4FromU32(u32 x, u32 y, u32 z, u32 w)
Inline struct K_uint4 K_Uint4FromU32(u32 x, u32 y, u32 z, u32 w)
{
return (struct K_uint4) { .v[0] = x, .v[1] = y, .v[2] = z, .v[3] = w };
}
typedef struct K_float K_float;
struct K_float { f32 v; };
INLINE struct K_float K_FloatFromF32(f32 v)
Inline struct K_float K_FloatFromF32(f32 v)
{
return (struct K_float) { .v = v };
}
typedef struct K_float2 K_float2;
struct K_float2 { f32 v[2]; };
INLINE struct K_float2 K_Float2FromV2(V2 v)
Inline struct K_float2 K_Float2FromV2(V2 v)
{
return (struct K_float2) { .v[0] = v.x, .v[1] = v.y };
}
typedef struct K_float3 K_float3;
struct K_float3 { f32 v[3]; };
INLINE struct K_float3 K_Float3FromV3(V3 v)
Inline struct K_float3 K_Float3FromV3(V3 v)
{
return (struct K_float3) { .v[0] = v.x, .v[1] = v.y, .v[2] = v.z };
}
typedef struct K_float4x4 K_float4x4;
struct K_float4x4 { f32 v[4][4]; };
INLINE struct K_float4x4 K_Float4x4FromMat4x4(Mat4x4 v)
Inline struct K_float4x4 K_Float4x4FromMat4x4(Mat4x4 v)
{
struct K_float4x4 res;
STATIC_ASSERT(sizeof(res) == sizeof(v));
StaticAssert(sizeof(res) == sizeof(v));
MEMCPY(&res, v.e, sizeof(res));
return res;
}
struct K_float2x3 { f32 v[2][3]; };
INLINE struct K_float2x3 K_Float2x3FromXform(Xform v)
Inline struct K_float2x3 K_Float2x3FromXform(Xform v)
{
struct K_float2x3 res;
STATIC_ASSERT(sizeof(res) == sizeof(v));
StaticAssert(sizeof(res) == sizeof(v));
MEMCPY(&res, &v, sizeof(res));
return res;
}

View File

@ -37,14 +37,14 @@ struct track {
SND_Sound *sound;
M_TrackDesc desc;
/* Internal */
/* internal */
struct mix mix;
struct track *next;
struct track *prev;
};
GLOBAL struct {
Global struct {
P_Mutex mutex;
/* Listener */
@ -57,7 +57,7 @@ GLOBAL struct {
struct track *track_last_playing;
u64 track_playing_count;
struct track *track_first_free;
} G = ZI, DEBUG_ALIAS(G, G_mixer);
} G = ZI, DebugAlias(G, G_mixer);
/* ========================== *
* Startup
@ -66,7 +66,7 @@ GLOBAL struct {
M_StartupReceipt mixer_startup(void)
{
__prof;
G.track_arena = AllocArena(GIBI(64));
G.track_arena = AllocArena(Gibi(64));
G.listener_pos = V2FromXY(0, 0);
G.listener_dir = V2FromXY(0, -1);
return (M_StartupReceipt) { 0 };
@ -76,7 +76,7 @@ M_StartupReceipt mixer_startup(void)
* Track
* ========================== */
INTERNAL M_Handle track_to_handle(struct track *track)
internal M_Handle track_to_handle(struct track *track)
{
return (M_Handle) {
.gen = track->gen,
@ -84,7 +84,7 @@ INTERNAL M_Handle track_to_handle(struct track *track)
};
}
INTERNAL struct track *track_from_handle(M_Handle handle)
internal struct track *track_from_handle(M_Handle handle)
{
struct track *track = (struct track *)handle.data;
if (track && track->gen == handle.gen) {
@ -94,7 +94,7 @@ INTERNAL struct track *track_from_handle(M_Handle handle)
}
}
INTERNAL struct track *track_alloc_locked(P_Lock *lock, SND_Sound *sound)
internal struct track *track_alloc_locked(P_Lock *lock, SND_Sound *sound)
{
P_AssertLockedE(lock, &G.mutex);
(UNUSED)lock;
@ -133,7 +133,7 @@ INTERNAL struct track *track_alloc_locked(P_Lock *lock, SND_Sound *sound)
return track;
}
INTERNAL void track_release_locked(P_Lock *lock, struct track *track)
internal void track_release_locked(P_Lock *lock, struct track *track)
{
P_AssertLockedE(lock, &G.mutex);
(UNUSED)lock;
@ -245,12 +245,12 @@ void mixer_set_listener(V2 pos, V2 dir)
* Update
* ========================== */
INTERNAL i16 sample_sound(SND_Sound *sound, u64 sample_pos, b32 wrap)
internal i16 sample_sound(SND_Sound *sound, u64 sample_pos, b32 wrap)
{
if (wrap) {
return sound->pcm.samples[sample_pos % sound->pcm.count];
} else if (sample_pos < sound->pcm.count) {
return sound->pcm.samples[sample_pos];
return sound->samples[sample_pos % sound->samples_count];
} else if (sample_pos < sound->samples_count) {
return sound->samples[sample_pos];
} else {
return 0;
}
@ -296,7 +296,7 @@ M_PcmF32 mixer_update(Arena *arena, u64 frame_count)
__profn("Mix track");
struct mix *mix = mixes[mix_index];
if (mix->source->pcm.count <= 0) {
if (mix->source->samples_count <= 0) {
/* Skip empty sounds */
continue;
}
@ -322,11 +322,11 @@ M_PcmF32 mixer_update(Arena *arena, u64 frame_count)
u64 source_sample_pos_start = mix->source_pos;
u64 source_sample_pos_end = source_sample_pos_start + source_samples_count;
if (source_sample_pos_end >= source->pcm.count) {
if (source_sample_pos_end >= source->samples_count) {
if (desc.looping) {
source_sample_pos_end = source_sample_pos_end % source->pcm.count;
source_sample_pos_end = source_sample_pos_end % source->samples_count;
} else {
source_sample_pos_end = source->pcm.count;
source_sample_pos_end = source->samples_count;
mix->track_finished = 1;
}
}

View File

@ -1,6 +1,6 @@
#include "mp3.h"
#if PLATFORM_WINDOWS
#if PlatformIsWindows
# include "mp3_core_mmf.c"
#else
# error Mp3 core not implemented for this platform

View File

@ -2,7 +2,8 @@
#define MP3_DECODE_FLAG_STEREO 0x01
Struct(MP3_Result) {
PcmData pcm;
u64 samples_count;
i16 *samples;
b32 success;
};

View File

@ -84,7 +84,7 @@ MP3_Result mp3_decode(Arena *arena, String encoded, u32 sample_rate, u32 flags)
* Read
* ========================== */
res.pcm.samples = PushDry(arena, i16);
res.samples_count = PushDry(arena, i16);
u64 sample_bytes_read = 0;
for (;;) {
IMFSample *sample;
@ -99,7 +99,7 @@ MP3_Result mp3_decode(Arena *arena, String encoded, u32 sample_rate, u32 flags)
res.success = 1;
break;
}
ASSERT(sample_flags == 0);
Assert(sample_flags == 0);
/* Read samples */
IMFMediaBuffer *buffer;
@ -119,7 +119,7 @@ MP3_Result mp3_decode(Arena *arena, String encoded, u32 sample_rate, u32 flags)
IMFSample_Release(sample);
}
res.pcm.count = sample_bytes_read / bytes_per_sample;
res.samples_count = sample_bytes_read / bytes_per_sample;
/* ========================== *
* Cleanup

View File

@ -115,13 +115,13 @@ struct host_msg_assembler_lookup_bin {
struct host_msg_assembler *last;
};
READONLY GLOBAL struct host_channel _g_host_channel_nil = { .valid = 0 };
Readonly Global struct host_channel _g_host_channel_nil = { .valid = 0 };
GLOBAL struct {
Global struct {
i32 _;
} G = ZI, DEBUG_ALIAS(G, G_host);
} G = ZI, DebugAlias(G, G_host);
INTERNAL void host_msg_assembler_release(struct host_msg_assembler *ma);
internal void host_msg_assembler_release(struct host_msg_assembler *ma);
/* ========================== *
* Startup
@ -139,17 +139,17 @@ N_StartupReceipt host_startup(void)
N_Host *host_alloc(u16 listen_port)
{
Arena *arena = AllocArena(GIBI(64));
Arena *arena = AllocArena(Gibi(64));
N_Host *host = PushStruct(arena, N_Host);
host->arena = arena;
host->cmd_arena = AllocArena(GIBI(64));
host->channel_arena = AllocArena(GIBI(64));
host->cmd_arena = AllocArena(Gibi(64));
host->channel_arena = AllocArena(Gibi(64));
host->rcv_buffer_read = PushStruct(host->arena, N_RcvBuffer);
host->rcv_buffer_write = PushStruct(host->arena, N_RcvBuffer);
host->rcv_buffer_read->arena = AllocArena(GIBI(64));
host->rcv_buffer_write->arena = AllocArena(GIBI(64));
host->buddy = AllocBuddyCtx(GIBI(64));
host->rcv_buffer_read->arena = AllocArena(Gibi(64));
host->rcv_buffer_write->arena = AllocArena(Gibi(64));
host->buddy = AllocBuddyCtx(Gibi(64));
host->channels = PushDry(host->channel_arena, struct host_channel);
@ -159,7 +159,7 @@ N_Host *host_alloc(u16 listen_port)
host->num_msg_assembler_lookup_bins = N_NumMsgAssemblerLookupBins;
host->msg_assembler_lookup_bins = PushArray(host->arena, struct host_msg_assembler_lookup_bin, host->num_msg_assembler_lookup_bins);
host->sock = P_AllocSock(listen_port, MEBI(2), MEBI(2));
host->sock = P_AllocSock(listen_port, Mebi(2), Mebi(2));
return host;
}
@ -180,12 +180,12 @@ void host_release(N_Host *host)
* Channel
* ========================== */
INTERNAL u64 hash_from_address(P_Address address)
internal u64 hash_from_address(P_Address address)
{
return hash_fnv64(HASH_FNV64_BASIS, STRING_FROM_STRUCT(&address));
}
INTERNAL struct host_channel *host_channel_from_address(N_Host *host, P_Address address)
internal struct host_channel *host_channel_from_address(N_Host *host, P_Address address)
{
u64 hash = hash_from_address(address);
N_ChannelLookupBin *bin = &host->channel_lookup_bins[hash % host->num_channel_lookup_bins];
@ -198,7 +198,7 @@ INTERNAL struct host_channel *host_channel_from_address(N_Host *host, P_Address
}
/* Returns nil channel if id = HOST_CHANNEL_ID_ALL */
INTERNAL struct host_channel *host_single_channel_from_id(N_Host *host, N_ChannelId channel_id)
internal struct host_channel *host_single_channel_from_id(N_Host *host, N_ChannelId channel_id)
{
if (channel_id.gen > 0 && channel_id.idx < host->num_channels_reserved) {
struct host_channel *channel = &host->channels[channel_id.idx];
@ -209,7 +209,7 @@ INTERNAL struct host_channel *host_single_channel_from_id(N_Host *host, N_Channe
return &_g_host_channel_nil;
}
INTERNAL struct host_channel_list host_channels_from_id(Arena *arena, N_Host *host, N_ChannelId channel_id)
internal struct host_channel_list host_channels_from_id(Arena *arena, N_Host *host, N_ChannelId channel_id)
{
struct host_channel_list res = ZI;
if (host_channel_id_eq(channel_id, HOST_CHANNEL_ID_ALL)) {
@ -238,7 +238,7 @@ INTERNAL struct host_channel_list host_channels_from_id(Arena *arena, N_Host *ho
return res;
}
INTERNAL struct host_channel *host_channel_alloc(N_Host *host, P_Address address)
internal struct host_channel *host_channel_alloc(N_Host *host, P_Address address)
{
N_ChannelId id = ZI;
struct host_channel *channel;
@ -274,7 +274,7 @@ INTERNAL struct host_channel *host_channel_alloc(N_Host *host, P_Address address
return channel;
}
INTERNAL void host_channel_release(struct host_channel *channel)
internal void host_channel_release(struct host_channel *channel)
{
N_Host *host = channel->host;
@ -322,7 +322,7 @@ INTERNAL void host_channel_release(struct host_channel *channel)
* Msg assembler
* ========================== */
INTERNAL u64 hash_from_channel_msg(N_ChannelId channel_id, u64 msg_id)
internal u64 hash_from_channel_msg(N_ChannelId channel_id, u64 msg_id)
{
u64 res = HASH_FNV64_BASIS;
res = hash_fnv64(res, STRING_FROM_STRUCT(&channel_id));
@ -330,7 +330,7 @@ INTERNAL u64 hash_from_channel_msg(N_ChannelId channel_id, u64 msg_id)
return res;
}
INTERNAL struct host_msg_assembler *host_get_msg_assembler(N_Host *host, N_ChannelId channel_id, u64 msg_id)
internal struct host_msg_assembler *host_get_msg_assembler(N_Host *host, N_ChannelId channel_id, u64 msg_id)
{
u64 hash = hash_from_channel_msg(channel_id, msg_id);
struct host_msg_assembler_lookup_bin *bin = &host->msg_assembler_lookup_bins[hash % host->num_msg_assembler_lookup_bins];
@ -342,7 +342,7 @@ INTERNAL struct host_msg_assembler *host_get_msg_assembler(N_Host *host, N_Chann
return 0;
}
INTERNAL struct host_msg_assembler *host_msg_assembler_alloc(struct host_channel *channel, u64 msg_id, u64 chunk_count, u64 now_ns, b32 is_reliable)
internal struct host_msg_assembler *host_msg_assembler_alloc(struct host_channel *channel, u64 msg_id, u64 chunk_count, u64 now_ns, b32 is_reliable)
{
N_Host *host = channel->host;
struct host_msg_assembler *ma;
@ -401,7 +401,7 @@ INTERNAL struct host_msg_assembler *host_msg_assembler_alloc(struct host_channel
return ma;
}
INTERNAL void host_msg_assembler_release(struct host_msg_assembler *ma)
internal void host_msg_assembler_release(struct host_msg_assembler *ma)
{
struct host_channel *channel = ma->channel;
N_Host *host = channel->host;
@ -444,7 +444,7 @@ INTERNAL void host_msg_assembler_release(struct host_msg_assembler *ma)
host->first_free_msg_assembler = ma;
}
INTERNAL void host_msg_assembler_touch(struct host_msg_assembler *ma, i64 now_ns)
internal void host_msg_assembler_touch(struct host_msg_assembler *ma, i64 now_ns)
{
struct host_channel *channel = ma->channel;
if (ma != channel->most_recent_msg_assembler) {
@ -478,7 +478,7 @@ INTERNAL void host_msg_assembler_touch(struct host_msg_assembler *ma, i64 now_ns
ma->touched_ns = now_ns;
}
INTERNAL b32 host_msg_assembler_is_chunk_filled(struct host_msg_assembler *ma, u64 chunk_id)
internal b32 host_msg_assembler_is_chunk_filled(struct host_msg_assembler *ma, u64 chunk_id)
{
if (chunk_id < ma->num_chunks_total) {
return (ma->chunk_bitmap[chunk_id / 8] & (1 << (chunk_id % 8))) != 0;
@ -486,7 +486,7 @@ INTERNAL b32 host_msg_assembler_is_chunk_filled(struct host_msg_assembler *ma, u
return 0;
}
INTERNAL void host_msg_assembler_set_chunk_received(struct host_msg_assembler *ma, u64 chunk_id)
internal void host_msg_assembler_set_chunk_received(struct host_msg_assembler *ma, u64 chunk_id)
{
if (chunk_id < ma->num_chunks_total) {
ma->chunk_bitmap[chunk_id / 8] |= (1 << (chunk_id % 8));
@ -497,7 +497,7 @@ INTERNAL void host_msg_assembler_set_chunk_received(struct host_msg_assembler *m
* Packet
* ========================== */
INTERNAL N_SndPacket *host_channel_snd_packet_alloc(struct host_channel *channel, b32 is_reliable)
internal N_SndPacket *host_channel_snd_packet_alloc(struct host_channel *channel, b32 is_reliable)
{
N_Host *host = channel->host;
N_SndPacket *packet = 0;
@ -534,7 +534,7 @@ INTERNAL N_SndPacket *host_channel_snd_packet_alloc(struct host_channel *channel
* Cmd interface
* ========================== */
INTERNAL N_Cmd *host_cmd_alloc_and_append(N_Host *host)
internal N_Cmd *host_cmd_alloc_and_append(N_Host *host)
{
N_Cmd *cmd = PushStruct(host->cmd_arena, N_Cmd);
if (host->last_cmd) {
@ -584,7 +584,7 @@ i64 host_get_channel_last_rtt_ns(N_Host *host, N_ChannelId channel_id)
* Update
* ========================== */
INTERNAL N_Event *push_event(Arena *arena, N_EventList *list)
internal N_Event *push_event(Arena *arena, N_EventList *list)
{
N_Event *event = PushStruct(arena, N_Event);
if (list->last) {
@ -769,12 +769,12 @@ N_EventList host_update_begin(Arena *arena, N_Host *host)
}
} else {
/* Overflow reading chunk */
ASSERT(0);
Assert(0);
}
}
} else {
/* Chunk id/count mismatch */
ASSERT(0);
Assert(0);
}
}
} break;
@ -833,7 +833,7 @@ N_EventList host_update_begin(Arena *arena, N_Host *host)
/* Release timed out unreliable msg buffers */
{
/* TODO: Configurable timeout */
i64 unreliable_msg_timeout_ns = NS_FROM_SECONDS(0.1);
i64 unreliable_msg_timeout_ns = NsFromSeconds(0.1);
struct host_msg_assembler *ma = channel->least_recent_msg_assembler;
while (ma) {
struct host_msg_assembler *next = ma->more_recent;

View File

@ -1,5 +1,5 @@
#define HOST_CHANNEL_ID_NIL (N_ChannelId) { .gen = 0, .idx = 0 }
#define HOST_CHANNEL_ID_ALL (N_ChannelId) { .gen = U32_MAX, .idx = U32_MAX }
#define HOST_CHANNEL_ID_ALL (N_ChannelId) { .gen = U32Max, .idx = U32Max }
#define N_PacketMagic 0xd9e3b8b6
#define N_MaxPacketChunkLen 1024
@ -148,8 +148,8 @@ void host_queue_write(N_Host *host, N_ChannelId channel_id, String msg, u32 flag
* ========================== */
i64 host_get_channel_last_rtt_ns(N_Host *host, N_ChannelId channel_id);
INLINE b32 host_channel_id_eq(N_ChannelId a, N_ChannelId b) { return a.idx == b.idx && a.gen == b.gen; }
INLINE b32 host_channel_id_is_nil(N_ChannelId id) { return id.gen == 0 && id.idx == 0; }
Inline b32 host_channel_id_eq(N_ChannelId a, N_ChannelId b) { return a.idx == b.idx && a.gen == b.gen; }
Inline b32 host_channel_id_is_nil(N_ChannelId id) { return id.gen == 0 && id.idx == 0; }
/* ========================== *
* Update

View File

@ -3,7 +3,7 @@
#include "platform_snc.c"
#include "platform_log.c"
#if PLATFORM_WINDOWS
#if PlatformIsWindows
# include "platform_win32.c"
#else
# error Platform core not implemented

View File

@ -7,7 +7,7 @@
#include "platform_core.h"
#include "platform_log.h"
#if PLATFORM_WINDOWS
#if PlatformIsWindows
#pragma warning(push, 0)
# define UNICODE
# define WIN32_LEAN_AND_MEAN

View File

@ -3,35 +3,35 @@
P_SharedLogCtx P_shared_log_ctx = ZI;
READONLY P_LogLevelSettings P_log_settings[P_LogLevel_Count] = {
Readonly P_LogLevelSettings P_log_settings[P_LogLevel_Count] = {
[P_LogLevel_Critical] = {
LIT_NOCAST("CRITICAL"),
COLOR_PURPLE
ColorPurple
},
[P_LogLevel_Error] = {
LIT_NOCAST("ERROR"),
COLOR_RED
ColorRed
},
[P_LogLevel_Warning] = {
LIT_NOCAST("WARNING"),
COLOR_YELLOW
ColorYellow
},
[P_LogLevel_Success] = {
LIT_NOCAST("SUCCESS"),
COLOR_GREEN
ColorGreen
},
[P_LogLevel_Info] = {
LIT_NOCAST("INFO"),
COLOR_WHITE
ColorWhite
},
[P_LogLevel_Debug] = {
LIT_NOCAST("DEBUG"),
COLOR_BLUE
ColorBlue
}
};
@ -42,7 +42,7 @@ void P_LogStartup(String logfile_path)
{
__prof;
P_SharedLogCtx *ctx = &P_shared_log_ctx;
ctx->callbacks_arena = AllocArena(MEBI(8));
ctx->callbacks_arena = AllocArena(Mebi(8));
if (logfile_path.len > 0)
{
/* Create / wipe log file */

View File

@ -34,7 +34,7 @@ Struct(LogEventCallback)
/* Log level configuration */
#ifndef P_LogLevel_CompTime
# if RTC || PROFILING
# if RtcIsEnabled || ProfilingIsEnabled
# define P_LogLevel_CompTime P_LogLevel_Debug
# else
# define P_LogLevel_CompTime P_LogLevel_Info
@ -43,7 +43,7 @@ Struct(LogEventCallback)
/* Source location configuration */
#ifndef P_IncludeLogSourceLocation
# define P_IncludeLogSourceLocation (DEBINFO)
# define P_IncludeLogSourceLocation (DebinfoEnabled)
#endif
#define P_LogLevel_None -1
@ -81,7 +81,7 @@ Struct(P_LogLevelSettings)
u32 color;
};
extern READONLY P_LogLevelSettings P_log_settings[P_LogLevel_Count];
extern Readonly P_LogLevelSettings P_log_settings[P_LogLevel_Count];
////////////////////////////////
//~ Startup

View File

@ -49,13 +49,13 @@ P_Lock P_LockSpinE(P_Mutex *m, i32 spin)
}
else
{
P_Wait(&m->v, &v, 4, I64_MAX);
P_Wait(&m->v, &v, 4, I64Max);
spin_cnt = 0;
}
}
}
#if RTC
#if RtcIsEnabled
atomic32_fetch_set(&m->exclusive_fiber_id, FiberId());
#endif
@ -96,7 +96,7 @@ P_Lock P_LockSpinS(P_Mutex *m, i32 spin)
}
else
{
P_Wait(&m->v, &v, 4, I64_MAX);
P_Wait(&m->v, &v, 4, I64Max);
spin_cnt = 0;
}
}
@ -122,7 +122,7 @@ void P_Unlock(P_Lock *l)
P_Mutex *m = l->mutex;
if (l->exclusive)
{
#if RTC
#if RtcIsEnabled
atomic32_fetch_set(&m->exclusive_fiber_id, 0);
#endif
atomic32_fetch_set(&m->v, 0);
@ -131,7 +131,7 @@ void P_Unlock(P_Lock *l)
{
atomic32_fetch_add(&m->v, -1);
}
P_Wake(&m->v, I32_MAX);
P_Wake(&m->v, I32Max);
MEMZERO_STRUCT(l);
}
@ -140,7 +140,7 @@ void P_Unlock(P_Lock *l)
void P_WaitOnCv(P_Cv *cv, P_Lock *l)
{
P_WaitOnCvTime(cv, l, I64_MAX);
P_WaitOnCvTime(cv, l, I64Max);
}
void P_WaitOnCvTime(P_Cv *cv, P_Lock *l, i64 timeout_ns)
@ -179,7 +179,7 @@ void P_CounterAdd(P_Counter *counter, i64 x)
i64 new_v = old_v + x;
if (old_v > 0 && new_v <= 0)
{
P_Wake(&counter->v, I32_MAX);
P_Wake(&counter->v, I32Max);
}
}
@ -188,7 +188,7 @@ void P_WaitOnCounter(P_Counter *counter)
i64 v = atomic64_fetch(&counter->v);
while (v > 0)
{
P_Wait(&counter->v, &v, sizeof(v), I64_MAX);
P_Wait(&counter->v, &v, sizeof(v), I64Max);
v = atomic64_fetch(&counter->v);
}
}

View File

@ -11,15 +11,15 @@ AlignedStruct(P_Mutex, 64)
*/
Atomic32 v;
#if RTC
#if RtcIsEnabled
Atomic32 exclusive_fiber_id;
u8 _pad[56];
#else
u8 _pad[60];
#endif
};
STATIC_ASSERT(sizeof(P_Mutex) == 64); /* Padding validation */
STATIC_ASSERT(alignof(P_Mutex) == 64); /* Prevent false sharing */
StaticAssert(sizeof(P_Mutex) == 64); /* Padding validation */
StaticAssert(alignof(P_Mutex) == 64); /* Prevent false sharing */
Struct(P_Lock)
{
@ -35,8 +35,8 @@ AlignedStruct(P_Cv, 64)
Atomic64 wake_gen;
u8 _pad[56];
};
STATIC_ASSERT(sizeof(P_Cv) == 64); /* Padding validation */
STATIC_ASSERT(alignof(P_Cv) == 64); /* Prevent false sharing */
StaticAssert(sizeof(P_Cv) == 64); /* Padding validation */
StaticAssert(alignof(P_Cv) == 64); /* Prevent false sharing */
////////////////////////////////
//~ Counter types
@ -46,8 +46,8 @@ AlignedStruct(P_Counter, 64)
Atomic64 v;
u8 _pad[56];
};
STATIC_ASSERT(sizeof(P_Counter) == 64); /* Padding validation */
STATIC_ASSERT(alignof(P_Counter) == 64); /* Prevent false sharing */
StaticAssert(sizeof(P_Counter) == 64); /* Padding validation */
StaticAssert(alignof(P_Counter) == 64); /* Prevent false sharing */
////////////////////////////////
//~ Mutex operations
@ -61,9 +61,9 @@ P_Lock P_LockS(P_Mutex *m);
void P_Unlock(P_Lock *lock);
//- Lock assertion
#if RTC
# define P_AssertLockedE(l, m) ASSERT((l)->mutex == (m) && (l)->exclusive == 1)
# define P_AssertLockedES(l, m) ASSERT((l)->mutex == (m))
#if RtcIsEnabled
# define P_AssertLockedE(l, m) Assert((l)->mutex == (m) && (l)->exclusive == 1)
# define P_AssertLockedES(l, m) Assert((l)->mutex == (m))
#else
# define P_AssertLockedE(l, m) (UNUSED)l
# define P_AssertLockedES(l, m) (UNUSED)l

View File

@ -66,7 +66,7 @@ P_W32_Thread *P_W32_AllocThread(P_W32_ThreadFunc *entry_point, void *thread_data
__prof;
TempArena scratch = BeginScratchNoConflict();
P_W32_SharedCtx *g = &P_W32_shared_ctx;
ASSERT(entry_point != 0);
Assert(entry_point != 0);
P_LogInfoF("Creating thread \"%F\"", FMT_STR(thread_name));
@ -186,7 +186,7 @@ void P_W32_WaitReleaseThread(P_W32_Thread *thread)
{
__prof;
b32 success = P_W32_TryReleaseThread(thread, F32_INFINITY);
ASSERT(success);
Assert(success);
(UNUSED)success;
}
@ -598,7 +598,7 @@ P_W32_Fiber *P_W32_AllocFiber(P_W32_JobPool *pool)
/* Concat fiber name */
i32 name_size = 1;
ASSERT(sizeof(sizeof(P_W32_FiberNamePrefixCstr)) <= P_W32_FiberNameMaxSize);
Assert(sizeof(sizeof(P_W32_FiberNamePrefixCstr)) <= P_W32_FiberNameMaxSize);
MEMCPY(new_name_cstr, P_W32_FiberNamePrefixCstr, sizeof(P_W32_FiberNamePrefixCstr));
name_size += sizeof(P_W32_FiberNamePrefixCstr) - 2;
MEMCPY(new_name_cstr + name_size, id_chars, id_chars_len);
@ -649,7 +649,7 @@ void P_W32_ReleaseFiber(P_W32_JobPool *pool, P_W32_Fiber *fiber)
P_W32_UnlockTicketMutex(&pool->free_fibers_lock);
}
FORCE_INLINE P_W32_Fiber *P_W32_FiberFromId(i16 id)
ForceInline P_W32_Fiber *P_W32_FiberFromId(i16 id)
{
P_W32_SharedCtx *g = &P_W32_shared_ctx;
if (id <= 0)
@ -662,7 +662,7 @@ FORCE_INLINE P_W32_Fiber *P_W32_FiberFromId(i16 id)
}
}
FORCE_NO_INLINE void P_W32_FiberResume(P_W32_Fiber *fiber)
ForceNoInline void P_W32_FiberResume(P_W32_Fiber *fiber)
{
MemoryBarrier();
SwitchToFiber(fiber->addr);
@ -672,13 +672,13 @@ FORCE_NO_INLINE void P_W32_FiberResume(P_W32_Fiber *fiber)
void P_W32_YieldFiber(P_W32_Fiber *fiber, P_W32_Fiber *parent_fiber)
{
(UNUSED)fiber;
ASSERT(fiber->id == FiberId());
ASSERT(parent_fiber->id == fiber->parent_id);
ASSERT(parent_fiber->id > 0);
Assert(fiber->id == FiberId());
Assert(parent_fiber->id == fiber->parent_id);
Assert(parent_fiber->id > 0);
{
__prof_fiber_leave();
P_W32_FiberResume(parent_fiber);
__prof_fiber_enter(fiber->name_cstr, PROF_THREAD_GROUP_FIBERS - MEBI(fiber->job_pool) + KIBI(1) + fiber->id);
__prof_fiber_enter(fiber->name_cstr, PROF_THREAD_GROUP_FIBERS - Mebi(fiber->job_pool) + Kibi(1) + fiber->id);
}
}
@ -686,7 +686,7 @@ void P_W32_FiberEntryPoint(void *id_ptr)
{
i16 id = (i32)(i64)id_ptr;
volatile P_W32_Fiber *fiber = P_W32_FiberFromId(id);
__prof_fiber_enter(fiber->name_cstr, PROF_THREAD_GROUP_FIBERS - MEBI(fiber->job_pool) + KIBI(1) + fiber->id);
__prof_fiber_enter(fiber->name_cstr, PROF_THREAD_GROUP_FIBERS - Mebi(fiber->job_pool) + Kibi(1) + fiber->id);
for (;;)
{
/* Run job */
@ -738,7 +738,7 @@ P_W32_ThreadDef(P_W32_JobWorkerEntryFunc, worker_ctx_arg)
{
__profn("Set priority");
b32 success = SetThreadPriority(thread_handle, pool->thread_priority) != 0;
ASSERT(success);
Assert(success);
(UNUSED)success;
}
@ -747,7 +747,7 @@ P_W32_ThreadDef(P_W32_JobWorkerEntryFunc, worker_ctx_arg)
{
__profn("Set affinity");
b32 success = SetThreadAffinityMask(thread_handle, pool->thread_affinity_mask) != 0;
#if RTC || PROFILING
#if RtcIsEnabled || ProfilingIsEnabled
{
/* Retry until external tools can set correct process affinity */
i32 delay_ms = 16;
@ -760,7 +760,7 @@ P_W32_ThreadDef(P_W32_JobWorkerEntryFunc, worker_ctx_arg)
}
}
#endif
ASSERT(success);
Assert(success);
(UNUSED)success;
}
#endif
@ -771,7 +771,7 @@ P_W32_ThreadDef(P_W32_JobWorkerEntryFunc, worker_ctx_arg)
__profn("Set mm thread characteristics");
DWORD task = 0;
HANDLE mmc_handle = AvSetMmThreadCharacteristics(L"Pro Audio", &task);
ASSERT(mmc_handle);
Assert(mmc_handle);
(UNUSED)mmc_handle;
}
}
@ -790,7 +790,7 @@ P_W32_ThreadDef(P_W32_JobWorkerEntryFunc, worker_ctx_arg)
void *job_sig = 0;
P_Counter *job_counter = 0;
{
//__profnc("Pull job", RGB32_F(0.75, 0.75, 0));
//__profnc("Pull job", Rgb32F(0.75, 0.75, 0));
for (P_Priority priority = 0; priority < (i32)countof(pool->job_queues) && !job_func; ++priority)
{
P_W32_JobQueue *queue = &pool->job_queues[priority];
@ -870,7 +870,7 @@ P_W32_ThreadDef(P_W32_JobWorkerEntryFunc, worker_ctx_arg)
}
job_fiber_id = job_fiber->id;
{
__profnc("Run fiber", RGB32_F(1, 1, 1));
__profnc("Run fiber", Rgb32F(1, 1, 1));
__profvalue(job_fiber->id);
P_W32_YieldParam yield = ZI;
job_fiber->parent_id = worker_fiber_id;
@ -903,7 +903,7 @@ P_W32_ThreadDef(P_W32_JobWorkerEntryFunc, worker_ctx_arg)
u32 wait_size = yield.wait.size;
i64 wait_timeout_ns = yield.wait.timeout_ns;
i64 wait_time = 0;
if (wait_timeout_ns > 0 && wait_timeout_ns < I64_MAX)
if (wait_timeout_ns > 0 && wait_timeout_ns < I64Max)
{
u64 current_scheduler_cycle = atomic64_fetch(&g->current_scheduler_cycle.v);
i64 current_scheduler_cycle_period_ns = atomic64_fetch(&g->current_scheduler_cycle_period_ns.v);
@ -928,7 +928,7 @@ P_W32_ThreadDef(P_W32_JobWorkerEntryFunc, worker_ctx_arg)
case 2: cancel_wait = (u16)_InterlockedCompareExchange16(wait_addr, 0, 0) != *(u16 *)wait_cmp; break;
case 4: cancel_wait = (u32)_InterlockedCompareExchange(wait_addr, 0, 0) != *(u32 *)wait_cmp; break;
case 8: cancel_wait = (u64)_InterlockedCompareExchange64(wait_addr, 0, 0) != *(u64 *)wait_cmp; break;
default: cancel_wait = 1; ASSERT(0); break; /* Invalid wait size */
default: cancel_wait = 1; Assert(0); break; /* Invalid wait size */
}
}
if (wait_time != 0 && !cancel_wait)
@ -1070,7 +1070,7 @@ P_W32_ThreadDef(P_W32_JobWorkerEntryFunc, worker_ctx_arg)
shutdown = atomic32_fetch(&pool->workers_shutdown.v);
if (num_jobs_in_queue <= 0 && !shutdown)
{
//__profnc("Wait for job", RGB32_F(0.75, 0.75, 0));
//__profnc("Wait for job", Rgb32F(0.75, 0.75, 0));
P_W32_LockTicketMutex(&pool->workers_wake_lock);
{
num_jobs_in_queue = atomic64_fetch(&pool->num_jobs_in_queue.v);
@ -1109,7 +1109,7 @@ P_W32_ThreadDef(P_W32_JobSchedulerEntryFunc, _)
i32 priority = THREAD_PRIORITY_TIME_CRITICAL;
b32 success = SetThreadPriority(GetCurrentThread(), priority);
(UNUSED)success;
ASSERT(success);
Assert(success);
}
/* Create high resolution timer */
@ -1244,8 +1244,8 @@ P_W32_Window *P_W32_AllocWindow(void)
}
MEMZERO_STRUCT(window);
window->event_arenas[0] = AllocArena(GIBI(64));
window->event_arenas[1] = AllocArena(GIBI(64));
window->event_arenas[0] = AllocArena(Gibi(64));
window->event_arenas[1] = AllocArena(Gibi(64));
/* Start window event thread */
/* NOTE: This thread must finish building for the window to actually be
@ -1676,7 +1676,7 @@ LRESULT CALLBACK P_W32_Win32WindowProc(HWND hwnd, UINT msg, WPARAM wparam, LPARA
{
u16 utf16_pair_bytes[2] = { high, low };
Utf16DecodeResult decoded = uni_decode_utf16((String16) { .len = countof(utf16_pair_bytes), .text = utf16_pair_bytes });
if (decoded.advance16 == 2 && decoded.codepoint < U32_MAX)
if (decoded.advance16 == 2 && decoded.codepoint < U32Max)
{
codepoint = decoded.codepoint;
}
@ -2504,7 +2504,7 @@ P_WatchInfoList P_ReadWatchWait(Arena *arena, P_Watch *dw)
{
OVERLAPPED ov = ZI;
ov.hEvent = CreateEventW(0, 0, 0, 0);
ASSERT(ov.hEvent);
Assert(ov.hEvent);
BOOL success = ReadDirectoryChangesW(w32_watch->dir_handle,
w32_watch->results_buff,
@ -2515,7 +2515,7 @@ P_WatchInfoList P_ReadWatchWait(Arena *arena, P_Watch *dw)
&ov,
0);
(UNUSED)success;
ASSERT(success);
Assert(success);
HANDLE handles[] = {
ov.hEvent,
@ -2606,7 +2606,7 @@ P_WatchInfoList P_ReadWatchWait(Arena *arena, P_Watch *dw)
}
else
{
ASSERT(0);
Assert(0);
}
}
@ -2787,7 +2787,7 @@ P_Address P_AddressFromIpPortCstr(char *ip_cstr, char *port_cstr)
res.valid = 1;
res.family = P_AddressFamily_Ipv4;
res.portnb = sockaddr->sin_port;
STATIC_ASSERT(sizeof(sockaddr->sin_addr) == 4);
StaticAssert(sizeof(sockaddr->sin_addr) == 4);
MEMCPY(res.ipnb, (void *)&sockaddr->sin_addr, 4);
break;
}
@ -2799,7 +2799,7 @@ P_Address P_AddressFromIpPortCstr(char *ip_cstr, char *port_cstr)
res.valid = 1;
res.family = P_AddressFamily_Ipv6;
res.portnb = sockaddr->sin6_port;
STATIC_ASSERT(sizeof(sockaddr->sin6_addr) == 16);
StaticAssert(sizeof(sockaddr->sin6_addr) == 16);
MEMCPY(res.ipnb, (void *)&sockaddr->sin6_addr, 16);
break;
#endif
@ -3020,7 +3020,7 @@ P_SockReadResult P_ReadSock(Arena *arena, P_Sock *sock)
{
P_W32_Sock *ws = (P_W32_Sock *)sock;
u64 read_buff_size = KIBI(64);
u64 read_buff_size = Kibi(64);
String read_buff = ZI;
read_buff.len = read_buff_size;
read_buff.text = PushArrayNoZero(arena, u8, read_buff_size);
@ -3046,11 +3046,11 @@ P_SockReadResult P_ReadSock(Arena *arena, P_Sock *sock)
}
else
{
#if RTC
#if RtcIsEnabled
i32 err = WSAGetLastError();
if (err != WSAEWOULDBLOCK && err != WSAETIMEDOUT && err != WSAECONNRESET)
{
ASSERT(0);
Assert(0);
}
#endif
}
@ -3067,12 +3067,12 @@ void P_WriteSock(P_Sock *sock, P_Address address, String data)
{
gstat_add(GSTAT_SOCK_BYTES_SENT, size);
}
#if RTC
#if RtcIsEnabled
if (size != (i32)data.len)
{
i32 err = WSAGetLastError();
(UNUSED)err;
ASSERT(0);
Assert(0);
}
#endif
}
@ -3281,9 +3281,9 @@ void P_Panic(String msg)
wstr[wstr_len] = 0;
#if RTC
#if RtcIsEnabled
MessageBoxExW(0, wstr, L"Fatal error", MB_ICONSTOP | MB_SETFOREGROUND | MB_TOPMOST, 0);
ASSERT(0);
Assert(0);
#endif
SetEvent(g->panic_event);
@ -3384,20 +3384,20 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
__profthread("Main thread", PROF_THREAD_GROUP_MAIN);
P_W32_SharedCtx *g = &P_W32_shared_ctx;
#if PROFILING
#if ProfilingIsEnabled
/* Start profiler */
{
__profn("Launch profiler");
STARTUPINFO si = ZI;
si.cb = sizeof(si);
PROCESS_INFORMATION pi = ZI;
wchar_t cmd[sizeof(PROFILING_CMD_WSTR)] = ZI;
MEMCPY(cmd, PROFILING_CMD_WSTR, sizeof(PROFILING_CMD_WSTR));
DeleteFileW(PROFILING_FILE_WSTR);
wchar_t cmd[sizeof(ProfilingIsEnabled_CMD_WSTR)] = ZI;
MEMCPY(cmd, ProfilingIsEnabled_CMD_WSTR, sizeof(ProfilingIsEnabled_CMD_WSTR));
DeleteFileW(ProfilingIsEnabled_FILE_WSTR);
b32 success = CreateProcessW(0, cmd, 0, 0, 0, DETACHED_PROCESS, 0, 0, &si, &pi);
if (!success)
{
MessageBoxExW(0, L"Failed to launch profiler using command '" PROFILING_CMD_WSTR L"'.", L"Error", MB_ICONSTOP | MB_SETFOREGROUND | MB_TOPMOST, 0);
MessageBoxExW(0, L"Failed to launch profiler using command '" ProfilingIsEnabled_CMD_WSTR L"'.", L"Error", MB_ICONSTOP | MB_SETFOREGROUND | MB_TOPMOST, 0);
}
}
/* Set internal profiler thread affinities */
@ -3443,7 +3443,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
delay_ms *= 2;
}
}
ASSERT(success);
Assert(success);
(UNUSED)success;
}
}
@ -3478,10 +3478,10 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
/* Init fibers */
g->num_fibers = 1; /* Fiber at index 0 always nil */
g->fiber_names_arena = AllocArena(GIBI(64));
g->fiber_names_arena = AllocArena(Gibi(64));
/* Init wait lists */
g->wait_lists_arena = AllocArena(GIBI(64));
g->wait_lists_arena = AllocArena(Gibi(64));
/* Convert main thread to fiber */
P_W32_AllocFiber(0);
@ -3495,7 +3495,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
for (P_Priority priority = 0; priority < (i32)countof(pool->job_queues); ++priority)
{
P_W32_JobQueue *queue = &pool->job_queues[priority];
queue->arena = AllocArena(GIBI(64));
queue->arena = AllocArena(Gibi(64));
}
}
@ -3545,22 +3545,22 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
};
b32 success = RegisterRawInputDevices(&rid, 1, sizeof(rid));
ASSERT(success);
Assert(success);
(UNUSED)success;
}
/* Init threads pool */
g->threads_arena = AllocArena(GIBI(64));
g->threads_arena = AllocArena(Gibi(64));
/* Init watches pool */
g->watches_arena = AllocArena(GIBI(64));
g->watches_arena = AllocArena(Gibi(64));
/* Init windows pool */
g->windows_arena = AllocArena(GIBI(64));
g->windows_arena = AllocArena(Gibi(64));
/* Init winsock */
WSAStartup(MAKEWORD(2, 2), &g->wsa_data);
g->socks_arena = AllocArena(GIBI(64));
g->socks_arena = AllocArena(Gibi(64));
/* Start job scheduler */
atomic64_fetch_set(&g->current_scheduler_cycle_period_ns.v, P_W32_DefaultSchedulerPeriodNs);
@ -3574,10 +3574,10 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
{
P_W32_JobPool *pool = &g->job_pools[pool_kind];
String name_fmt = ZI;
i32 prof_group = PROF_THREAD_GROUP_FIBERS - MEBI(pool_kind);
i32 prof_group = PROF_THREAD_GROUP_FIBERS - Mebi(pool_kind);
switch (pool_kind)
{
default: ASSERT(0); break;
default: Assert(0); break;
case P_Pool_Sim:
{
@ -3618,7 +3618,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
pool->thread_affinity_mask = 0x0000000000000FFFull;
} break;
}
pool->worker_threads_arena = AllocArena(GIBI(64));
pool->worker_threads_arena = AllocArena(Gibi(64));
pool->worker_threads = PushArray(pool->worker_threads_arena, P_W32_Thread *, pool->num_worker_threads);
pool->worker_contexts = PushArray(pool->worker_threads_arena, P_W32_WorkerCtx, pool->num_worker_threads);
for (i32 i = 0; i < pool->num_worker_threads; ++i)
@ -3751,7 +3751,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
//- CRT stub
#if !CRTLIB
#if !CrtlibIsEnabled
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wmissing-variable-declarations"
@ -3770,4 +3770,4 @@ void __stdcall wWinMainCRTStartup(void)
#pragma clang diagnostic pop
#endif /* !CRTLIB */
#endif /* !CrtlibIsEnabled */

View File

@ -10,7 +10,7 @@ Struct(P_W32_TicketMutex)
////////////////////////////////
//~ Thread types
#define P_W32_ThreadStackSize KIBI(64)
#define P_W32_ThreadStackSize Kibi(64)
#define P_W32_ThreadDef(name, arg_name) void name(void *arg_name)
typedef P_W32_ThreadDef(P_W32_ThreadFunc, data);
@ -40,7 +40,7 @@ AlignedStruct(P_W32_WaitList, 64)
P_W32_WaitList *next_in_bin;
P_W32_WaitList *prev_in_bin;
};
STATIC_ASSERT(alignof(P_W32_WaitList) == 64); /* Avoid false sharing */
StaticAssert(alignof(P_W32_WaitList) == 64); /* Avoid false sharing */
AlignedStruct(P_W32_WaitBin, 64)
{
@ -49,12 +49,12 @@ AlignedStruct(P_W32_WaitBin, 64)
P_W32_WaitList *first_free_wait_list;
P_W32_TicketMutex lock;
};
STATIC_ASSERT(alignof(P_W32_WaitBin) == 64); /* Avoid false sharing */
StaticAssert(alignof(P_W32_WaitBin) == 64); /* Avoid false sharing */
////////////////////////////////
//~ Fiber types
#define P_W32_FiberStackSize MEBI(4)
#define P_W32_FiberStackSize Mebi(4)
#define P_W32_FiberNamePrefixCstr "Fiber ["
#define P_W32_FiberNameSuffixCstr "]"
#define P_W32_FiberNameMaxSize 64
@ -126,9 +126,9 @@ AlignedStruct(P_W32_Fiber, 64)
u8 _pad3[24]; /* 24 bytes (padding) */
};
STATIC_ASSERT(sizeof(P_W32_Fiber) == 128); /* Padding validation (increase if necessary) */
STATIC_ASSERT(alignof(P_W32_Fiber) == 64); /* Avoid false sharing */
STATIC_ASSERT(offsetof(P_W32_Fiber, wake_lock) % 4 == 0); /* Atomic must be aligned */
StaticAssert(sizeof(P_W32_Fiber) == 128); /* Padding validation (increase if necessary) */
StaticAssert(alignof(P_W32_Fiber) == 64); /* Avoid false sharing */
StaticAssert(offsetof(P_W32_Fiber, wake_lock) % 4 == 0); /* Atomic must be aligned */
////////////////////////////////
//~ Job queue types
@ -248,7 +248,7 @@ Struct(P_W32_Watch)
HANDLE dir_handle;
HANDLE wake_handle;
P_W32_Watch *next_free;
u8 results_buff[KIBI(64)];
u8 results_buff[Kibi(64)];
};
////////////////////////////////
@ -393,8 +393,8 @@ void P_W32_WakeByTime(u64 time);
P_W32_Fiber *P_W32_AllocFiber(P_W32_JobPool *pool);
void P_W32_ReleaseFiber(P_W32_JobPool *pool, P_W32_Fiber *fiber);
FORCE_INLINE P_W32_Fiber *P_W32_FiberFromId(i16 id);
FORCE_NO_INLINE void P_W32_FiberResume(P_W32_Fiber *fiber);
ForceInline P_W32_Fiber *P_W32_FiberFromId(i16 id);
ForceNoInline void P_W32_FiberResume(P_W32_Fiber *fiber);
void P_W32_YieldFiber(P_W32_Fiber *fiber, P_W32_Fiber *parent_fiber);
void P_W32_FiberEntryPoint(void *id_ptr);

View File

@ -1,6 +1,6 @@
#include "playback.h"
#if PLATFORM_WINDOWS
#if PlatformIsWindows
# include "playback_core_win32.c"
#else
# error Playback core not implemented for this platform

View File

@ -28,7 +28,7 @@ struct wasapi_buffer {
u8 *frames;
};
GLOBAL struct {
Global struct {
Atomic32 shutdown;
IAudioClient *client;
HANDLE event;
@ -36,15 +36,15 @@ GLOBAL struct {
WAVEFORMATEX *buffer_format;
u32 buffer_frames;
P_Counter playback_job_counter;
} G = ZI, DEBUG_ALIAS(G, G_playback_wasapi);
} G = ZI, DebugAlias(G, G_playback_wasapi);
/* ========================== *
* Startup
* ========================== */
INTERNAL void wasapi_initialize(void);
INTERNAL P_ExitFuncDef(playback_shutdown);
INTERNAL P_JobDef(playback_job, _);
internal void wasapi_initialize(void);
internal P_ExitFuncDef(playback_shutdown);
internal P_JobDef(playback_job, _);
PB_StartupReceipt playback_startup(M_StartupReceipt *mixer_sr)
{
@ -58,7 +58,7 @@ PB_StartupReceipt playback_startup(M_StartupReceipt *mixer_sr)
return (PB_StartupReceipt) { 0 };
}
INTERNAL P_ExitFuncDef(playback_shutdown)
internal P_ExitFuncDef(playback_shutdown)
{
__prof;
atomic32_fetch_set(&G.shutdown, 1);
@ -69,7 +69,7 @@ INTERNAL P_ExitFuncDef(playback_shutdown)
* Wasapi initialization
* ========================== */
INTERNAL void wasapi_initialize(void)
internal void wasapi_initialize(void)
{
u64 sample_rate = PLAYBACK_SAMPLE_RATE;
u64 channel_count = 2;
@ -162,7 +162,7 @@ INTERNAL void wasapi_initialize(void)
* Playback thread update
* ========================== */
INTERNAL struct wasapi_buffer wasapi_update_begin(void)
internal struct wasapi_buffer wasapi_update_begin(void)
{
__prof;
struct wasapi_buffer wspbuf = ZI;
@ -181,7 +181,7 @@ INTERNAL struct wasapi_buffer wasapi_update_begin(void)
return wspbuf;
}
INTERNAL void wasapi_update_end(struct wasapi_buffer *wspbuf, M_PcmF32 src)
internal void wasapi_update_end(struct wasapi_buffer *wspbuf, M_PcmF32 src)
{
__prof;
u32 frames_in_source = src.count / 2;
@ -199,7 +199,7 @@ INTERNAL void wasapi_update_end(struct wasapi_buffer *wspbuf, M_PcmF32 src)
/* This shouldn't occur, mixer should be generating samples equivilent
* to value returned from `wasapi_update_begin`. */
ASSERT(0);
Assert(0);
}
#if !AUDIO_ENABLED
@ -215,7 +215,7 @@ INTERNAL void wasapi_update_end(struct wasapi_buffer *wspbuf, M_PcmF32 src)
* Playback thread entry
* ========================== */
INTERNAL P_JobDef(playback_job, _)
internal P_JobDef(playback_job, _)
{
__prof;
(UNUSED)_;

View File

@ -1,4 +1,4 @@
#if defined(PROFILING) && PROFILING == 1
#if defined(ProfilingIsEnabled) && ProfilingIsEnabled == 1
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Weverything"

View File

@ -1,25 +1,25 @@
#if defined(PROFILING) && PROFILING == 1
#if defined(ProfilingIsEnabled) && ProfilingIsEnabled == 1
#ifndef __clang__
# error Only clang is supported when compiling with PROFILING=1 (cleanup attributes are required for profiling markup)
# error Only clang is supported when compiling with ProfilingIsEnabled=1 (cleanup attributes are required for profiling markup)
#endif
#define PROFILING_SYSTEM_TRACE 0
#define PROFILING_CAPTURE_FRAME_IMAGE 0
#define PROFILING_LOCKS 0
#define PROFILING_GPU 1
#define PROFILING_GPU_STABLE_POWER_STATE 1
#define ProfilingIsEnabled_SYSTEM_TRACE 0
#define ProfilingIsEnabled_CAPTURE_FRAME_IMAGE 0
#define ProfilingIsEnabled_LOCKS 0
#define ProfilingIsEnabled_GPU 1
#define ProfilingIsEnabled_GPU_STABLE_POWER_STATE 1
//#define PROFILER_THREAD_AFFINITY_MASK 0x000000000000F000ull
#define PROFILER_THREAD_AFFINITY_MASK 0
#define PROFILER_THREAD_PREFIX_WSTR L"Tracy"
#define PROFILING_FILE_WSTR L".tracy"
#define PROFILING_CMD_WSTR L"cmd /C start \"\" /wait tracy-capture.exe -o .tracy -a 127.0.0.1 && start \"\" tracy-profiler.exe .tracy"
//#define PROFILING_CMD_WSTR L"tracy-profiler.exe -a 127.0.0.1"
#define ProfilingIsEnabled_FILE_WSTR L".tracy"
#define ProfilingIsEnabled_CMD_WSTR L"cmd /C start \"\" /wait tracy-capture.exe -o .tracy -a 127.0.0.1 && start \"\" tracy-profiler.exe .tracy"
//#define ProfilingIsEnabled_CMD_WSTR L"tracy-profiler.exe -a 127.0.0.1"
/* Tracy defines */
#define TRACY_ENABLE
#define TRACY_FIBERS
#if !PROFILING_SYSTEM_TRACE
#if !ProfilingIsEnabled_SYSTEM_TRACE
# define TRACY_NO_CALLSTACK
# define TRACY_NO_SYSTEM_TRACING
#endif
@ -32,15 +32,15 @@
#pragma clang diagnostic ignored "-Wincompatible-pointer-types-discards-qualifiers"
#include TRACY_CLIENT_HEADER_PATH
INLINE void __prof_zone_cleanup_func(TracyCZoneCtx *ctx) { TracyCZoneEnd(*ctx) }
#define __profnc(name, color) static const struct ___tracy_source_location_data CAT(__tracy_source_location,__LINE__) = { (name), __func__, __FILE__, (uint32_t)__LINE__, BGR32(color) }; __attribute((cleanup(__prof_zone_cleanup_func))) TracyCZoneCtx __tracy_zone_ctx = ___tracy_emit_zone_begin( &CAT(__tracy_source_location,__LINE__), 1 )
Inline void __prof_zone_cleanup_func(TracyCZoneCtx *ctx) { TracyCZoneEnd(*ctx) }
#define __profnc(name, color) static const struct ___tracy_source_location_data Cat(__tracy_source_location,__LINE__) = { (name), __func__, __FILE__, (uint32_t)__LINE__, Bgr32(color) }; __attribute((cleanup(__prof_zone_cleanup_func))) TracyCZoneCtx __tracy_zone_ctx = ___tracy_emit_zone_begin( &Cat(__tracy_source_location,__LINE__), 1 )
#define __profn(name) __profnc(name, 0)
#define __prof __profnc(0, 0)
#define __profvalue(v) TracyCZoneValue(__tracy_zone_ctx, (v))
#define __profalloc(ptr, size) TracyCAlloc((ptr), (size))
#define __proffree(ptr) TracyCFree((ptr))
#define __profmsg(txt, len, col) TracyCMessageC((txt), (len), BGR32(col))
#define __profmsg(txt, len, col) TracyCMessageC((txt), (len), Bgr32(col))
#define __profframe(name) TracyCFrameMarkNamed((name))
#define __profthread(name, group_hint) TracyCSetThreadNameWithHint((name), (group_hint))
@ -50,16 +50,16 @@ enum __prof_plot_type {
__prof_plot_type_percentage = TracyPlotFormatPercentage,
__prof_plot_type_watt = TracyPlotFormatWatt
};
#define __prof_plot_init(name, type, step, fill, color) TracyCPlotConfig(name, type, step, fill, BGR32(color))
#define __prof_plot_init(name, type, step, fill, color) TracyCPlotConfig(name, type, step, fill, Bgr32(color))
#define __prof_plot(name, val) TracyCPlot(name, val)
#define __prof_plot_i(name, val) TracyCPlotI(name, val)
#define __prof_is_connected() ___tracy_connected()
#else
#define PROFILING_CAPTURE_FRAME_IMAGE 0
#define PROFILING_LOCKS 0
#define PROFILING_GPU 0
#define ProfilingIsEnabled_CAPTURE_FRAME_IMAGE 0
#define ProfilingIsEnabled_LOCKS 0
#define ProfilingIsEnabled_GPU 0
#define __profnc(name, color)
#define __profn(name)
@ -76,9 +76,9 @@ enum __prof_plot_type {
#define __prof_plot_i(name, val)
#define __prof_is_connected() 0
#endif /* PROFILING */
#endif /* ProfilingIsEnabled */
#if PROFILING_LOCKS
#if ProfilingIsEnabled_LOCKS
# define __proflock_ctx(name) struct TracyCSharedLockCtx *name
# define __proflock_alloc(ctx) TracyCSharedLockAnnounce((ctx))
# define __proflock_release(ctx) TracyCSharedLockTerminate((ctx))
@ -105,19 +105,19 @@ enum __prof_plot_type {
# define __proflock_after_try_shared_lock(ctx, acquired)
# define __proflock_mark(ctx)
# define __proflock_custom_name(ctx, name, len)
#endif /* PROFILING && PROFILING_LOCKS */
#endif /* ProfilingIsEnabled && ProfilingIsEnabled_LOCKS */
#if PROFILING_GPU
#if ProfilingIsEnabled_GPU
/* Dx11 */
INLINE void __prof_dx11_zone_cleanup_func(TracyCD3D11ZoneCtx *ctx) { ___tracy_d3d11_emit_zone_end(*ctx); }
# define __profnc_dx11(dx11_ctx, name, color) static const struct ___tracy_source_location_data CAT(__tracy_gpu_d3d11_source_location,__LINE__) = { name, __func__, __FILE__, (uint32_t)__LINE__, BGR32(color) }; __attribute((cleanup(__prof_dx11_zone_cleanup_func))) TracyCD3D11ZoneCtx __tracy_d3d11_zone_ctx; ___tracy_d3d11_emit_zone_begin( dx11_ctx, &__tracy_d3d11_zone_ctx, &CAT(__tracy_gpu_d3d11_source_location,__LINE__), 1)
Inline void __prof_dx11_zone_cleanup_func(TracyCD3D11ZoneCtx *ctx) { ___tracy_d3d11_emit_zone_end(*ctx); }
# define __profnc_dx11(dx11_ctx, name, color) static const struct ___tracy_source_location_data Cat(__tracy_gpu_d3d11_source_location,__LINE__) = { name, __func__, __FILE__, (uint32_t)__LINE__, Bgr32(color) }; __attribute((cleanup(__prof_dx11_zone_cleanup_func))) TracyCD3D11ZoneCtx __tracy_d3d11_zone_ctx; ___tracy_d3d11_emit_zone_begin( dx11_ctx, &__tracy_d3d11_zone_ctx, &Cat(__tracy_gpu_d3d11_source_location,__LINE__), 1)
# define __prof_dx11_ctx(name) struct TracyCD3D11Ctx *name
# define __prof_dx11_ctx_alloc(ctx, device, device_ctx, name, name_len) ctx = ___tracy_d3d11_context_announce(device, device_ctx, name, name_len)
# define __prof_dx11_ctx_release(ctx) ___tracy_d3d11_context_terminate(ctx)
# define __prof_dx11_collect(ctx) ___tracy_d3d11_context_collect(ctx)
/* Dx12 */
INLINE void __prof_dx12_zone_cleanup_func(TracyCD3D12ZoneCtx *ctx) { ___tracy_d3d12_emit_zone_end(*ctx); }
# define __profnc_dx12(dx12_ctx, cmd_list, name, color) static const struct ___tracy_source_location_data CAT(__tracy_gpu_d3d12_source_location,__LINE__) = { name, __func__, __FILE__, (uint32_t)__LINE__, BGR32(color) }; __attribute((cleanup(__prof_dx12_zone_cleanup_func))) TracyCD3D12ZoneCtx __tracy_d3d12_zone_ctx; ___tracy_d3d12_emit_zone_begin( dx12_ctx, cmd_list, &__tracy_d3d12_zone_ctx, &CAT(__tracy_gpu_d3d12_source_location,__LINE__), 1)
Inline void __prof_dx12_zone_cleanup_func(TracyCD3D12ZoneCtx *ctx) { ___tracy_d3d12_emit_zone_end(*ctx); }
# define __profnc_dx12(dx12_ctx, cmd_list, name, color) static const struct ___tracy_source_location_data Cat(__tracy_gpu_d3d12_source_location,__LINE__) = { name, __func__, __FILE__, (uint32_t)__LINE__, Bgr32(color) }; __attribute((cleanup(__prof_dx12_zone_cleanup_func))) TracyCD3D12ZoneCtx __tracy_d3d12_zone_ctx; ___tracy_d3d12_emit_zone_begin( dx12_ctx, cmd_list, &__tracy_d3d12_zone_ctx, &Cat(__tracy_gpu_d3d12_source_location,__LINE__), 1)
# define __prof_dx12_ctx(name) struct TracyCD3D12Ctx *name
# define __prof_dx12_ctx_alloc(ctx, device, queue, name, name_len) ctx = ___tracy_d3d12_context_announce(device, queue, name, name_len)
# define __prof_dx12_ctx_release(ctx) ___tracy_d3d12_context_terminate(ctx)
@ -133,19 +133,19 @@ INLINE void __prof_dx12_zone_cleanup_func(TracyCD3D12ZoneCtx *ctx) { ___tracy_d3
# define __prof_dx12_ctx_release(ctx)
# define __prof_dx12_new_frame(ctx)
# define __prof_dx12_collect(ctx)
#endif /* PROFILING_GPU */
#endif /* ProfilingIsEnabled_GPU */
#if PROFILING_CAPTURE_FRAME_IMAGE
#if ProfilingIsEnabled_CAPTURE_FRAME_IMAGE
# define __profframeimage(image, width, height, offset, flipped) TracyCFrameImage((image), (width), (height), (offset), (flipped))
#else
# define __profframeimage(image, width, height, offset, flipped)
#endif /* PROFILING_CAPTURE_FRAME_IMAGE */
#endif /* ProfilingIsEnabled_CAPTURE_FRAME_IMAGE */
#ifdef TRACY_FIBERS
/* Tracy fiber methods are wrapped in FORCE_NO_INLINE because otherwise issues can arise
/* Tracy fiber methods are wrapped in ForceNoInline because otherwise issues can arise
* accross fiber context boundaries during optimization */
FORCE_NO_INLINE INLINE void __prof_fiber_enter(char *fiber_name, i32 profiler_group) { TracyCFiberEnterWithHint(fiber_name, profiler_group); }
FORCE_NO_INLINE INLINE void __prof_fiber_leave(void) { TracyCFiberLeave; }
ForceNoInline Inline void __prof_fiber_enter(char *fiber_name, i32 profiler_group) { TracyCFiberEnterWithHint(fiber_name, profiler_group); }
ForceNoInline Inline void __prof_fiber_leave(void) { TracyCFiberLeave; }
#else
# define __prof_fiber_enter(fiber_name, profiler_group)
# define __prof_fiber_leave()

View File

@ -4,13 +4,13 @@
/* Add resource data to binary */
GLOBAL struct {
Global struct {
Arena *arena;
#if RESOURCES_EMBEDDED
struct tar_archive archive;
#endif
} G = ZI, DEBUG_ALIAS(G, G_resource);
} G = ZI, DebugAlias(G, G_resource);
/* ========================== *
* Startup
@ -19,7 +19,7 @@ GLOBAL struct {
R_StartupReceipt resource_startup(void)
{
__prof;
G.arena = AllocArena(GIBI(64));
G.arena = AllocArena(Gibi(64));
#if RESOURCES_EMBEDDED
String embedded_data = inc_res_tar();
@ -88,7 +88,7 @@ R_Resource resource_open(String name)
res._name_len = name.len;
MEMCPY(res._name_text, name.text, name.len);
} else {
ASSERT(0);
Assert(0);
}
return res;
#endif

View File

@ -32,30 +32,30 @@
* Startup
* ========================== */
GLOBAL struct {
Global struct {
Arena *nil_arena;
ClientStore *nil_client_store;
Client *nil_client;
Snapshot *nil_snapshot;
Ent *nil_ent;
} G = ZI, DEBUG_ALIAS(G, G_sim);
} G = ZI, DebugAlias(G, G_sim);
/* Accessed via `sim_client_store_nil()` */
READONLY ClientStore **_g_sim_client_store_nil = &G.nil_client_store;
Readonly ClientStore **_g_sim_client_store_nil = &G.nil_client_store;
/* Accessed via `sim_client_nil()` */
READONLY Client **_g_sim_client_nil = &G.nil_client;
Readonly Client **_g_sim_client_nil = &G.nil_client;
/* Accessed via `sim_snapshot_nil()` */
READONLY Snapshot **_g_sim_snapshot_nil = &G.nil_snapshot;
Readonly Snapshot **_g_sim_snapshot_nil = &G.nil_snapshot;
/* Accessed via `sim_ent_nil()` */
READONLY Ent **_g_sim_ent_nil = &G.nil_ent;
Readonly Ent **_g_sim_ent_nil = &G.nil_ent;
SimStartupReceipt sim_startup(void)
{
__prof;
G.nil_arena = AllocArena(GIBI(1));
G.nil_arena = AllocArena(Gibi(1));
/* Nil client store */
G.nil_client_store = PushStruct(G.nil_arena, ClientStore);
@ -83,7 +83,7 @@ SimStartupReceipt sim_startup(void)
G.nil_ent->mass_unscaled = 1;
G.nil_ent->inertia_unscaled = 1;
G.nil_ent->sprite_local_xform = XFORM_IDENT;
G.nil_ent->sprite_tint = COLOR_WHITE;
G.nil_ent->sprite_tint = ColorWhite;
/* Lock nil arena */
SetArenaReadonly(G.nil_arena);
@ -99,14 +99,14 @@ ClientStore *sim_client_store_alloc(void)
__prof;
ClientStore *store;
{
Arena *arena = AllocArena(GIBI(64));
Arena *arena = AllocArena(Gibi(64));
store = PushStruct(arena, ClientStore);
store->arena = arena;
}
store->valid = 1;
store->num_client_lookup_bins = CLIENT_LOOKUP_BINS;
store->client_lookup_bins = PushArray(store->arena, ClientLookupBin, store->num_client_lookup_bins);
store->clients_arena = AllocArena(GIBI(64));
store->clients_arena = AllocArena(Gibi(64));
store->clients = PushDry(store->clients_arena, Client);
return store;
}
@ -149,7 +149,7 @@ Client *sim_client_alloc(ClientStore *store)
client->valid = 1;
client->handle = handle;
client->snapshots_arena = AllocArena(GIBI(8));
client->snapshots_arena = AllocArena(Gibi(8));
client->num_snapshot_lookup_bins = TICK_LOOKUP_BINS;
client->snapshot_lookup_bins = PushArray(client->snapshots_arena, SnapshotLookupBin, client->num_snapshot_lookup_bins);
@ -187,7 +187,7 @@ void sim_client_release(Client *client)
* Client lookup
* ========================== */
INTERNAL u64 hash_from_channel_id(N_ChannelId channel_id)
internal u64 hash_from_channel_id(N_ChannelId channel_id)
{
return hash_fnv64(HASH_FNV64_BASIS, STRING_FROM_STRUCT(&channel_id));
}
@ -286,8 +286,8 @@ Snapshot *sim_snapshot_alloc(Client *client, Snapshot *src, u64 tick)
arena = ss->arena;
} else {
/* Arenas allocated here will be released with client */
arena = AllocArena(GIBI(1));
ents_arena = AllocArena(GIBI(1));
arena = AllocArena(Gibi(1));
ents_arena = AllocArena(Gibi(1));
}
}
ResetArena(arena);
@ -615,7 +615,7 @@ Snapshot *sim_snapshot_alloc_from_lerp(Client *client, Snapshot *ss0, Snapshot *
__prof;
/* New snapshot will be allocated with same tick as ss0 or ss1, so the result should go into a different client */
ASSERT(ss0->client != client && ss1->client != client);
Assert(ss0->client != client && ss1->client != client);
Snapshot *ss;
b32 should_blend = 1;
@ -632,7 +632,7 @@ Snapshot *sim_snapshot_alloc_from_lerp(Client *client, Snapshot *ss0, Snapshot *
if (!ss0->valid || !ss1->valid) {
/* New snapshot allocation caused one of the src snapshots original to release.
* ss0 & ss1 should be from a separate client than the allocating one. */
ASSERT(0);
Assert(0);
}
if (should_blend) {
@ -820,7 +820,7 @@ void sim_snapshot_decode(BB_Reader *br, Snapshot *ss)
}
} else {
/* Invalid bin index */
ASSERT(0);
Assert(0);
}
bin_changed = BB_ReadBit(br);
@ -847,7 +847,7 @@ void sim_snapshot_decode(BB_Reader *br, Snapshot *ss)
} else if (reserve_diff < 0) {
/* TODO: Handle this */
/* NOTE: Should be impossible for snasphot reserve count to decrease at the moment */
ASSERT(0);
Assert(0);
}
}
@ -983,7 +983,7 @@ void sim_snapshot_decode(BB_Reader *br, Snapshot *ss)
released = BB_ReadBit(br);
if (released) {
Ent *e = sim_ent_from_index(ss, e);
ASSERT(e->valid); /* An entity that we don't have allocated should never have been marked for release */
Assert(e->valid); /* An entity that we don't have allocated should never have been marked for release */
if (e->valid) {
sim_ent_enable_prop(e, SEPROP_RELEASE);
}
@ -1020,8 +1020,8 @@ void sim_snapshot_decode(BB_Reader *br, Snapshot *ss)
if (n->is_new) {
u32 index = n->index;
Ent *parent = sim_ent_from_index(ss, n->alloc_parent_index);
ASSERT(!sim_ent_from_index(ss, index)->valid && !sim_ent_from_id(ss, alloc_ent_id)->valid); /* An entity that we have allocated already should never be marked for allocation */
ASSERT(parent->valid); /* Parent for new entity allocation should always be valid */
Assert(!sim_ent_from_index(ss, index)->valid && !sim_ent_from_id(ss, alloc_ent_id)->valid); /* An entity that we have allocated already should never be marked for allocation */
Assert(parent->valid); /* Parent for new entity allocation should always be valid */
if (parent->valid && index < ss->num_ents_reserved) {
Ent *ent = &ss->ents[index];
ent->valid = 1;
@ -1029,7 +1029,7 @@ void sim_snapshot_decode(BB_Reader *br, Snapshot *ss)
sim_ent_link_parent(parent, ent);
} else {
/* Received an invalid entity allocation */
ASSERT(0);
Assert(0);
}
}
}
@ -1043,7 +1043,7 @@ void sim_snapshot_decode(BB_Reader *br, Snapshot *ss)
sim_ent_decode(&ent_br, e);
} else {
/* Received delta for unallocated ent */
ASSERT(0);
Assert(0);
}
}
#else
@ -1088,7 +1088,7 @@ void sim_snapshot_decode(BB_Reader *br, Snapshot *ss)
if (!allocated) {
/* Why is an already released ent being marked as released? */
ASSERT(e->valid);
Assert(e->valid);
if (e->valid) {
sim_ent_enable_prop(e, SEPROP_RELEASE);
}

View File

@ -52,9 +52,9 @@ Struct(ClientStore) {
u64 num_clients_reserved;
};
INLINE ClientStore *sim_client_store_nil(void)
Inline ClientStore *sim_client_store_nil(void)
{
extern READONLY ClientStore **_g_sim_client_store_nil;
extern Readonly ClientStore **_g_sim_client_store_nil;
return *_g_sim_client_store_nil;
}
@ -112,13 +112,13 @@ Struct(Client) {
SnapshotLookupBin *snapshot_lookup_bins;
};
INLINE Client *sim_client_nil(void)
Inline Client *sim_client_nil(void)
{
extern READONLY Client **_g_sim_client_nil;
extern Readonly Client **_g_sim_client_nil;
return *_g_sim_client_nil;
}
INLINE b32 sim_client_handle_eq(ClientHandle a, ClientHandle b)
Inline b32 sim_client_handle_eq(ClientHandle a, ClientHandle b)
{
return a.gen == b.gen && a.idx == b.idx;
}
@ -177,7 +177,7 @@ typedef i32 TileKind; enum {
NUM_SIM_TILE_KINDS
};
STATIC_ASSERT(NUM_SIM_TILE_KINDS < 256); /* Tile kind must fit in 8 bits */
StaticAssert(NUM_SIM_TILE_KINDS < 256); /* Tile kind must fit in 8 bits */
Struct(Snapshot) {
b32 valid;
@ -216,9 +216,9 @@ Struct(Snapshot) {
u32 num_ents_reserved;
};
INLINE Snapshot *sim_snapshot_nil(void)
Inline Snapshot *sim_snapshot_nil(void)
{
extern READONLY Snapshot **_g_sim_snapshot_nil;
extern Readonly Snapshot **_g_sim_snapshot_nil;
return *_g_sim_snapshot_nil;
}

View File

@ -3,12 +3,12 @@
#define SIM_ENT_COLLISION_DEBUG_BASIS_UID (MakeUID(0x302c01182013bb02, 0x570bd270399d11a5))
#define SIM_ENT_TILE_CHUNK_BASIS_UID (MakeUID(0x3ce42de071dd226b, 0x9b566f7df30c813a))
INTERNAL u32 index_from_ent(Snapshot *ss, Ent *ent)
internal u32 index_from_ent(Snapshot *ss, Ent *ent)
{
return ent - ss->ents;
}
INTERNAL Ent *ent_from_index(Snapshot *ss, u32 index)
internal Ent *ent_from_index(Snapshot *ss, u32 index)
{
if (index > 0 && index < ss->num_ents_reserved) {
return &ss->ents[index];
@ -23,9 +23,9 @@ INTERNAL Ent *ent_from_index(Snapshot *ss, u32 index)
Ent *sim_ent_alloc_raw(Snapshot *ss, Ent *parent, EntId id)
{
ASSERT(parent->valid);
ASSERT(ss->valid);
ASSERT(ss == parent->ss);
Assert(parent->valid);
Assert(ss->valid);
Assert(ss == parent->ss);
Ent *ent;
if (ss->first_free_ent > 0 && ss->first_free_ent < ss->num_ents_reserved) {
/* Reuse from free list */
@ -168,7 +168,7 @@ void sim_ent_activate(Ent *ent, u64 current_tick)
* Ent id
* ========================== */
INTERNAL EntBin *bin_from_id(Snapshot *ss, EntId id)
internal EntBin *bin_from_id(Snapshot *ss, EntId id)
{
return &ss->id_bins[id.uid.lo % ss->num_id_bins];
}
@ -201,7 +201,7 @@ void sim_ent_set_id(Ent *ent, EntId id)
}
/* Old id not in bin, this should be impossible. */
ASSERT(search->valid);
Assert(search->valid);
if (prev->valid) {
prev->next_in_id_bin = next_index;
@ -218,11 +218,11 @@ void sim_ent_set_id(Ent *ent, EntId id)
/* Insert new id into lookup */
if (!sim_ent_id_is_nil(id)) {
#if RTC
#if RtcIsEnabled
{
Ent *existing = sim_ent_from_id(ss, id);
/* Collision should be extremely unlikely under normal circumstances, there's probably a logic error somewhere. */
ASSERT(!existing->valid);
Assert(!existing->valid);
}
#endif
@ -400,7 +400,7 @@ void sim_ent_unlink_from_parent(Ent *ent)
* Ent xform
* ========================== */
INTERNAL void sim_ent_mark_child_xforms_dirty(Snapshot *ss, Ent *ent)
internal void sim_ent_mark_child_xforms_dirty(Snapshot *ss, Ent *ent)
{
for (Ent *child = sim_ent_from_id(ss, ent->first); child->valid; child = sim_ent_from_id(ss, child->next)) {
if (child->_is_xform_dirty) {
@ -412,7 +412,7 @@ INTERNAL void sim_ent_mark_child_xforms_dirty(Snapshot *ss, Ent *ent)
}
}
INTERNAL Xform sim_ent_get_xform_internal(Snapshot *ss, Ent *ent)
internal Xform sim_ent_get_xform_internal(Snapshot *ss, Ent *ent)
{
Xform xf;
if (ent->_is_xform_dirty) {
@ -649,7 +649,7 @@ void sim_ent_sync(Ent *local, Ent *remote)
MEMCPY_STRUCT(local, remote);
/* Why would 2 ents w/ different uids ever be synced? */
ASSERT(sim_ent_id_eq(local->id, old.id));
Assert(sim_ent_id_eq(local->id, old.id));
local->ss = old.ss;
local->id = old.id;

View File

@ -410,9 +410,9 @@ Struct(EntBin) {
* Nil
* ========================== */
INLINE Ent *sim_ent_nil(void)
Inline Ent *sim_ent_nil(void)
{
extern READONLY Ent **_g_sim_ent_nil;
extern Readonly Ent **_g_sim_ent_nil;
return *_g_sim_ent_nil;
}
@ -420,12 +420,12 @@ INLINE Ent *sim_ent_nil(void)
* Id helpers
* ========================== */
INLINE b32 sim_ent_id_eq(EntId a, EntId b)
Inline b32 sim_ent_id_eq(EntId a, EntId b)
{
return uid_eq(a.uid, b.uid);
}
INLINE b32 sim_ent_id_is_nil(EntId id)
Inline b32 sim_ent_id_is_nil(EntId id)
{
return uid_eq(id.uid, SIM_ENT_NIL_ID.uid);
}
@ -434,43 +434,43 @@ INLINE b32 sim_ent_id_is_nil(EntId id)
* Property helpers
* ========================== */
INLINE void sim_ent_enable_prop(Ent *ent, EntProp prop)
Inline void sim_ent_enable_prop(Ent *ent, EntProp prop)
{
u64 index = prop / 64;
u64 bit = prop % 64;
ent->props[index] |= ((u64)1 << bit);
}
INLINE void sim_ent_disable_prop(Ent *ent, EntProp prop)
Inline void sim_ent_disable_prop(Ent *ent, EntProp prop)
{
u64 index = prop / 64;
u64 bit = prop % 64;
ent->props[index] &= ~((u64)1 << bit);
}
INLINE b32 sim_ent_has_prop(Ent *ent, EntProp prop)
Inline b32 sim_ent_has_prop(Ent *ent, EntProp prop)
{
u64 index = prop / 64;
u64 bit = prop % 64;
return !!(ent->props[index] & ((u64)1 << bit));
}
INLINE b32 sim_ent_is_valid_and_active(Ent *ent)
Inline b32 sim_ent_is_valid_and_active(Ent *ent)
{
return ent->valid && sim_ent_has_prop(ent, SEPROP_ACTIVE);
}
INLINE b32 sim_ent_should_predict(Ent *ent)
Inline b32 sim_ent_should_predict(Ent *ent)
{
return sim_ent_id_eq(ent->predictor, ent->ss->local_player);
}
INLINE b32 sim_ent_is_owner(Ent *ent)
Inline b32 sim_ent_is_owner(Ent *ent)
{
return sim_ent_id_eq(ent->owner, ent->ss->local_player);
}
INLINE b32 sim_ent_should_simulate(Ent *ent)
Inline b32 sim_ent_should_simulate(Ent *ent)
{
b32 res = 0;
if (sim_ent_is_valid_and_active(ent)) {

View File

@ -5,7 +5,7 @@
* Contact
* ========================== */
INTERNAL b32 can_contact(Ent *e0, Ent *e1)
internal b32 can_contact(Ent *e0, Ent *e1)
{
b32 res = 0;
res = e0 != e1 &&
@ -84,8 +84,8 @@ void phys_create_and_update_contacts(PhysStepCtx *ctx, f32 elapsed_dt, u64 phys_
CLD_CollisionResult collider_res = collider_collision_points(&e0_collider, &e1_collider, e0_xf, e1_xf);
/* Parts of algorithm are hard-coded to support 2 contact points */
STATIC_ASSERT(countof(constraint_ent->contact_constraint_data.points) == 2);
STATIC_ASSERT(countof(collider_res.points) == 2);
StaticAssert(countof(constraint_ent->contact_constraint_data.points) == 2);
StaticAssert(countof(collider_res.points) == 2);
ContactConstraint *constraint = 0;
if (collider_res.num_points > 0) {
@ -155,7 +155,7 @@ void phys_create_and_update_contacts(PhysStepCtx *ctx, f32 elapsed_dt, u64 phys_
contact->vcp1 = v2_sub(point, e1_xf.og);
contact->starting_separation = sep;
#if DEVELOPER
#if DeveloperIsEnabled
contact->dbg_pt = point;
#endif
}
@ -1086,7 +1086,7 @@ void phys_solve_weld_joints(PhysStepCtx *ctx, f32 dt)
* Integration
* ========================== */
INTERNAL Xform get_derived_xform(Ent *ent, f32 dt)
internal Xform get_derived_xform(Ent *ent, f32 dt)
{
Xform xf = sim_ent_get_xform(ent);

View File

@ -46,7 +46,7 @@ struct ContactPoint {
f32 inv_tangent_mass;
/* Debugging */
#if DEVELOPER
#if DeveloperIsEnabled
V2 dbg_pt;
#endif
};

View File

@ -4,9 +4,9 @@
#define SPACE_ENTRIES_OFFSET (sizeof(Space) + (sizeof(Space) % alignof(SpaceEntry)))
/* Accessed via sim_ent_nil() */
READONLY SpaceEntry _g_space_entry_nil = { .valid = 0 };
READONLY SpaceCell _g_space_cell_nil = { .valid = 0 };
READONLY Space _g_space_nil = { .valid = 0 };
Readonly SpaceEntry _g_space_entry_nil = { .valid = 0 };
Readonly SpaceCell _g_space_cell_nil = { .valid = 0 };
Readonly Space _g_space_nil = { .valid = 0 };
/* ========================== *
* Space alloc
@ -19,7 +19,7 @@ Space *space_alloc(f32 cell_size, u32 num_bins_sqrt)
{
Space *space;
{
Arena *arena = AllocArena(GIBI(64));
Arena *arena = AllocArena(Gibi(64));
space = PushStruct(arena, Space);
space->entry_arena = arena;
}
@ -27,7 +27,7 @@ Space *space_alloc(f32 cell_size, u32 num_bins_sqrt)
space->valid = 1;
space->entries = PushDry(space->entry_arena, SpaceEntry);
space->cell_arena = AllocArena(GIBI(64));
space->cell_arena = AllocArena(Gibi(64));
space->cell_size = cell_size;
space->num_bins = num_bins_sqrt * num_bins_sqrt;
space->num_bins_sqrt = num_bins_sqrt;
@ -58,7 +58,7 @@ Space *space_from_entry(SpaceEntry *entry)
if (entry->valid) {
u64 first_entry_addr = (u64)(entry - entry->handle.idx);
Space *space = (Space *)(first_entry_addr - SPACE_ENTRIES_OFFSET);
ASSERT(space->entries == (SpaceEntry *)first_entry_addr);
Assert(space->entries == (SpaceEntry *)first_entry_addr);
return space;
} else {
return space_nil();
@ -69,7 +69,7 @@ Space *space_from_entry(SpaceEntry *entry)
* Cell
* ========================== */
INTERNAL V2i32 world_to_cell_coords(f32 cell_size, V2 world_pos)
internal V2i32 world_to_cell_coords(f32 cell_size, V2 world_pos)
{
f32 x = world_pos.x;
f32 y = world_pos.y;
@ -78,12 +78,12 @@ INTERNAL V2i32 world_to_cell_coords(f32 cell_size, V2 world_pos)
return V2i32FromXY((i32)x, (i32)y);
}
INTERNAL i32 cell_coords_to_bin_index(Space *space, V2i32 cell_pos)
internal i32 cell_coords_to_bin_index(Space *space, V2i32 cell_pos)
{
i32 num_bins_sqrt = space->num_bins_sqrt;
/* Cell pos of 0 is not valid and will be converted to -1 */
ASSERT(cell_pos.x != 0 && cell_pos.y != 0);
Assert(cell_pos.x != 0 && cell_pos.y != 0);
i32 index_x = cell_pos.x;
i32 index_y = cell_pos.y;
@ -95,7 +95,7 @@ INTERNAL i32 cell_coords_to_bin_index(Space *space, V2i32 cell_pos)
index_y += (index_y < 0) * (num_bins_sqrt * ((index_y / -num_bins_sqrt) + 1));
i32 bin_index = (index_x % num_bins_sqrt) + (index_y % num_bins_sqrt) * num_bins_sqrt;
ASSERT(bin_index >= 0 && bin_index < (i32)space->num_bins);
Assert(bin_index >= 0 && bin_index < (i32)space->num_bins);
return bin_index;
}
@ -114,7 +114,7 @@ SpaceCell *space_get_cell(Space *space, V2i32 cell_pos)
return res;
}
INTERNAL void space_cell_node_alloc(V2i32 cell_pos, SpaceEntry *entry)
internal void space_cell_node_alloc(V2i32 cell_pos, SpaceEntry *entry)
{
Space *space = space_from_entry(entry);
i32 bin_index = cell_coords_to_bin_index(space, cell_pos);
@ -183,7 +183,7 @@ INTERNAL void space_cell_node_alloc(V2i32 cell_pos, SpaceEntry *entry)
entry->last_node = node;
}
INTERNAL void space_cell_node_release(SpaceCellNode *n)
internal void space_cell_node_release(SpaceCellNode *n)
{
SpaceCell *cell = n->cell;
SpaceEntry *entry = n->entry;
@ -389,7 +389,7 @@ SpaceEntry *space_iter_next(SpaceIter *iter)
SpaceCellNode *next_node = 0;
if (cell_cur.x >= cell_start.x && cell_cur.x <= cell_end.x && cell_cur.y >= cell_start.y && cell_cur.y <= cell_end.y) {
/* Started */
ASSERT(iter->prev != 0);
Assert(iter->prev != 0);
next_node = iter->prev->next_in_cell;
} else if (cell_cur.x > cell_end.x || cell_cur.y > cell_end.y) {
/* Ended */

View File

@ -92,21 +92,21 @@ struct SpaceIter {
* Nil
* ========================== */
INLINE SpaceEntry *space_entry_nil(void)
Inline SpaceEntry *space_entry_nil(void)
{
extern READONLY SpaceEntry _g_space_entry_nil;
extern Readonly SpaceEntry _g_space_entry_nil;
return &_g_space_entry_nil;
}
INLINE SpaceCell *space_cell_nil(void)
Inline SpaceCell *space_cell_nil(void)
{
extern READONLY SpaceCell _g_space_cell_nil;
extern Readonly SpaceCell _g_space_cell_nil;
return &_g_space_cell_nil;
}
INLINE Space *space_nil(void)
Inline Space *space_nil(void)
{
extern READONLY Space _g_space_nil;
extern Readonly Space _g_space_nil;
return &_g_space_nil;
}

View File

@ -33,7 +33,7 @@ void sim_accel_reset(Snapshot *ss, SimAccel *accel)
/* TODO: Remove this */
INTERNAL Ent *test_spawn_smg(Ent *parent)
internal Ent *test_spawn_smg(Ent *parent)
{
Ent *e = sim_ent_alloc_sync_src(parent);
e->sprite = sprite_tag_from_path(LIT("sprite/gun.ase"));
@ -49,7 +49,7 @@ INTERNAL Ent *test_spawn_smg(Ent *parent)
return e;
}
INTERNAL Ent *test_spawn_launcher(Ent *parent)
internal Ent *test_spawn_launcher(Ent *parent)
{
Ent *e = sim_ent_alloc_sync_src(parent);
e->sprite = sprite_tag_from_path(LIT("sprite/gun.ase"));
@ -65,7 +65,7 @@ INTERNAL Ent *test_spawn_launcher(Ent *parent)
return e;
}
INTERNAL Ent *test_spawn_chucker(Ent *parent)
internal Ent *test_spawn_chucker(Ent *parent)
{
Ent *chucker = sim_ent_alloc_sync_src(parent);
chucker->sprite = sprite_tag_from_path(LIT("sprite/gun.ase"));
@ -100,7 +100,7 @@ INTERNAL Ent *test_spawn_chucker(Ent *parent)
return chucker;
}
INTERNAL Ent *test_spawn_employee(Ent *parent)
internal Ent *test_spawn_employee(Ent *parent)
{
/* Player */
Ent *employee = sim_ent_nil();
@ -184,7 +184,7 @@ INTERNAL Ent *test_spawn_employee(Ent *parent)
return employee;
}
INTERNAL Ent *test_spawn_camera(Ent *parent, Ent *follow)
internal Ent *test_spawn_camera(Ent *parent, Ent *follow)
{
Ent *camera_ent = sim_ent_nil();
if (follow->valid) {
@ -203,7 +203,7 @@ INTERNAL Ent *test_spawn_camera(Ent *parent, Ent *follow)
return camera_ent;
}
INTERNAL Ent *test_spawn_explosion(Ent *parent, V2 pos, f32 strength, f32 radius)
internal Ent *test_spawn_explosion(Ent *parent, V2 pos, f32 strength, f32 radius)
{
Ent *ent = sim_ent_alloc_sync_src(parent);
sim_ent_set_xform(ent, XFORM_POS(pos));
@ -219,7 +219,7 @@ INTERNAL Ent *test_spawn_explosion(Ent *parent, V2 pos, f32 strength, f32 radius
return ent;
}
INTERNAL void test_teleport(Ent *ent, V2 pos)
internal void test_teleport(Ent *ent, V2 pos)
{
//++ent->continuity_gen;
Xform xf = sim_ent_get_xform(ent);
@ -227,7 +227,7 @@ INTERNAL void test_teleport(Ent *ent, V2 pos)
sim_ent_set_xform(ent, xf);
}
INTERNAL void test_spawn_entities1(Ent *parent, V2 pos)
internal void test_spawn_entities1(Ent *parent, V2 pos)
{
(UNUSED)pos;
@ -240,7 +240,7 @@ INTERNAL void test_spawn_entities1(Ent *parent, V2 pos)
}
}
INTERNAL void test_spawn_entities2(Ent *parent, V2 pos)
internal void test_spawn_entities2(Ent *parent, V2 pos)
{
(UNUSED)pos;
@ -258,8 +258,8 @@ INTERNAL void test_spawn_entities2(Ent *parent, V2 pos)
e->sprite = sprite_tag_from_path(LIT("sprite/tile.ase"));
e->layer = SIM_LAYER_SHOULDERS;
//e->sprite_tint = ALPHA32_F(COLOR_BLUE, 0.75);
//e->sprite_tint = ALPHA32_F(COLOR_WHITE, 1);
//e->sprite_tint = Alpha32F(ColorBlue, 0.75);
//e->sprite_tint = Alpha32F(ColorWhite, 1);
sim_ent_enable_prop(e, SEPROP_SOLID);
Quad collider_quad = quad_from_rect(RECT(-0.5, -0.5, 1, 1));
@ -274,7 +274,7 @@ INTERNAL void test_spawn_entities2(Ent *parent, V2 pos)
f32 g = rand_f64_from_state(&rand, 1, 5);
f32 b = rand_f64_from_state(&rand, 1, 5);
e->sprite_emittance = V3FromXYZ(r, g, b);
e->sprite_tint = RGBA32_F(r / 5, g / 5, b / 5, 1);
e->sprite_tint = Rgba32F(r / 5, g / 5, b / 5, 1);
}
sim_ent_enable_prop(e, SEPROP_DYNAMIC);
@ -311,7 +311,7 @@ INTERNAL void test_spawn_entities2(Ent *parent, V2 pos)
#endif
}
INTERNAL void test_spawn_entities3(Ent *parent, V2 pos)
internal void test_spawn_entities3(Ent *parent, V2 pos)
{
(UNUSED)pos;
@ -328,7 +328,7 @@ INTERNAL void test_spawn_entities3(Ent *parent, V2 pos)
e->sprite = sprite_tag_from_path(LIT("sprite/box.ase"));
e->layer = SIM_LAYER_SHOULDERS;
e->sprite_tint = COLOR_RED;
e->sprite_tint = ColorRed;
sim_ent_enable_prop(e, SEPROP_SOLID);
Quad collider_quad = quad_from_rect(RECT(-0.5, -0.5, 1, 1));
@ -336,7 +336,7 @@ INTERNAL void test_spawn_entities3(Ent *parent, V2 pos)
}
}
INTERNAL void test_spawn_entities4(Ent *parent, V2 pos)
internal void test_spawn_entities4(Ent *parent, V2 pos)
{
(UNUSED)pos;
@ -355,10 +355,10 @@ INTERNAL void test_spawn_entities4(Ent *parent, V2 pos)
sim_ent_enable_prop(e, SEPROP_LIGHT_TEST);
e->sprite_emittance = V3FromXYZ(2, 2, 2);
e->sprite_tint = RGB32_F(1, 1, 1);
e->sprite_tint = Rgb32F(1, 1, 1);
}
INTERNAL void test_spawn_tile(Snapshot *world, V2 world_pos)
internal void test_spawn_tile(Snapshot *world, V2 world_pos)
{
#if 0
Ent *e = sim_ent_alloc_sync_src(parent);
@ -379,7 +379,7 @@ INTERNAL void test_spawn_tile(Snapshot *world, V2 world_pos)
e->layer = SIM_LAYER_WALLS;
e->sprite = sprite_tag_from_path(LIT("sprite/tile.ase"));
e->sprite_tint = COLOR_RED;
e->sprite_tint = ColorRed;
{
S_Scope *scope = sprite_scope_begin();
@ -405,7 +405,7 @@ INTERNAL void test_spawn_tile(Snapshot *world, V2 world_pos)
INTERNAL SORT_COMPARE_FUNC_DEF(tile_chunk_sort_x, arg_a, arg_b, udata)
internal SORT_COMPARE_FUNC_DEF(tile_chunk_sort_x, arg_a, arg_b, udata)
{
(UNUSED)udata;
Ent *a = *(Ent **)arg_a;
@ -418,7 +418,7 @@ INTERNAL SORT_COMPARE_FUNC_DEF(tile_chunk_sort_x, arg_a, arg_b, udata)
return res;
}
INTERNAL SORT_COMPARE_FUNC_DEF(tile_chunk_sort_y, arg_a, arg_b, udata)
internal SORT_COMPARE_FUNC_DEF(tile_chunk_sort_y, arg_a, arg_b, udata)
{
(UNUSED)udata;
Ent *a = *(Ent **)arg_a;
@ -431,7 +431,7 @@ INTERNAL SORT_COMPARE_FUNC_DEF(tile_chunk_sort_y, arg_a, arg_b, udata)
return res;
}
INTERNAL void test_generate_walls(Snapshot *world)
internal void test_generate_walls(Snapshot *world)
{
__prof;
TempArena scratch = BeginScratchNoConflict();
@ -675,7 +675,7 @@ INTERNAL void test_generate_walls(Snapshot *world)
wall_ent->local_collider.points[1] = v2_sub(end, start);
V2 dirs[4] = { V2FromXY(0, -1), V2FromXY(1, 0), V2FromXY(0, 1), V2FromXY(-1, 0) };
ASSERT(node->wall_dir >= 0 && (u32)node->wall_dir < countof(dirs));
Assert(node->wall_dir >= 0 && (u32)node->wall_dir < countof(dirs));
wall_ent->collision_dir = dirs[node->wall_dir];
sim_ent_activate(wall_ent, world->tick);
@ -695,7 +695,7 @@ INTERNAL void test_generate_walls(Snapshot *world)
INTERNAL void test_clear_level(SimStepCtx *ctx)
internal void test_clear_level(SimStepCtx *ctx)
{
Snapshot *world = ctx->world;
for (u64 j = 0; j < world->num_ents_reserved; ++j) {
@ -710,7 +710,7 @@ INTERNAL void test_clear_level(SimStepCtx *ctx)
* Respond to physics collisions
* ========================== */
INTERNAL PHYS_COLLISION_CALLBACK_FUNC_DEF(on_collision, data, step_ctx)
internal PHYS_COLLISION_CALLBACK_FUNC_DEF(on_collision, data, step_ctx)
{
Snapshot *world = step_ctx->world;
Ent *e0 = sim_ent_from_id(world, data->e0);
@ -751,7 +751,7 @@ INTERNAL PHYS_COLLISION_CALLBACK_FUNC_DEF(on_collision, data, step_ctx)
Xform xf = XFORM_TRS(.t = point, .r = rand_f64_from_state(&step_ctx->rand, 0, TAU));
Ent *decal = sim_ent_alloc_sync_src(root);
decal->sprite = sprite_tag_from_path(LIT("sprite/blood.ase"));
decal->sprite_tint = RGBA32_F(1, 1, 1, 0.25f);
decal->sprite_tint = Rgba32F(1, 1, 1, 0.25f);
decal->layer = SIM_LAYER_FLOOR_DECALS;
sim_ent_set_xform(decal, xf);
@ -858,7 +858,7 @@ void sim_step(SimStepCtx *ctx)
world->sim_dt_ns = max_i64(0, sim_dt_ns);
world->sim_time_ns += world->sim_dt_ns;
f32 sim_dt = SECONDS_FROM_NS(world->sim_dt_ns);
f32 sim_dt = SecondsFromNs(world->sim_dt_ns);
S_Scope *sprite_frame_scope = sprite_scope_begin();
@ -899,7 +899,7 @@ void sim_step(SimStepCtx *ctx)
if (is_master && player->valid) {
player->player_last_rtt_ns = client->last_rtt_ns;
player->player_average_rtt_seconds -= player->player_average_rtt_seconds / 200;
player->player_average_rtt_seconds += SECONDS_FROM_NS(client->last_rtt_ns) / 200;
player->player_average_rtt_seconds += SecondsFromNs(client->last_rtt_ns) / 200;
}
/* Sync ents from client */
@ -1101,7 +1101,7 @@ void sim_step(SimStepCtx *ctx)
default:
{
/* Invalid cmd kind */
ASSERT(0);
Assert(0);
} break;
}
@ -1176,10 +1176,10 @@ void sim_step(SimStepCtx *ctx)
{
S_SheetSpan span = sprite_sheet_get_span(sheet, ent->sprite_span_name);
if (ent->animation_last_frame_change_time_ns == 0) {
ent->animation_last_frame_change_time_ns = SECONDS_FROM_NS(world->sim_time_ns);
ent->animation_last_frame_change_time_ns = SecondsFromNs(world->sim_time_ns);
}
f64 time_in_frame = SECONDS_FROM_NS(world->sim_time_ns - ent->animation_last_frame_change_time_ns);
f64 time_in_frame = SecondsFromNs(world->sim_time_ns - ent->animation_last_frame_change_time_ns);
u64 frame_index = ent->animation_frame;
if (frame_index < span.start || frame_index > span.end) {
frame_index = span.start;
@ -1332,7 +1332,7 @@ void sim_step(SimStepCtx *ctx)
if (primary_triggered) {
i64 world_time_ns = world->sim_time_ns;
if ((world_time_ns - ent->last_primary_fire_ns >= NS_FROM_SECONDS(ent->primary_fire_delay)) || ent->last_primary_fire_ns == 0) {
if ((world_time_ns - ent->last_primary_fire_ns >= NsFromSeconds(ent->primary_fire_delay)) || ent->last_primary_fire_ns == 0) {
ent->last_primary_fire_ns = world_time_ns;
} else {
primary_triggered = 0;
@ -1340,7 +1340,7 @@ void sim_step(SimStepCtx *ctx)
}
if (secondary_triggered) {
i64 world_time_ns = world->sim_time_ns;
if ((world_time_ns - ent->last_secondary_fire_ns >= NS_FROM_SECONDS(ent->secondary_fire_delay)) || ent->last_secondary_fire_ns == 0) {
if ((world_time_ns - ent->last_secondary_fire_ns >= NsFromSeconds(ent->secondary_fire_delay)) || ent->last_secondary_fire_ns == 0) {
ent->last_secondary_fire_ns = world_time_ns;
} else {
secondary_triggered = 0;
@ -1596,7 +1596,7 @@ void sim_step(SimStepCtx *ctx)
}
f32 new_vel = 0;
if (!F32_IS_NAN(new_angle)) {
if (!IsF32Nan(new_angle)) {
const f32 angle_error_allowed = 0.001f;
Xform joint_xf = sim_ent_get_xform(joint_ent);
f32 diff = math_unwind_angle(new_angle - xform_get_rotation(joint_xf));

View File

@ -19,9 +19,9 @@ struct sound_task_params_store {
* Global state
* ========================== */
GLOBAL struct {
Global struct {
struct sound_task_params_store params;
} G = ZI, DEBUG_ALIAS(G, G_sound);
} G = ZI, DebugAlias(G, G_sound);
/* ========================== *
* Startup
@ -31,7 +31,7 @@ SND_StartupReceipt sound_startup(AC_StartupReceipt *asset_cache_sr)
{
__prof;
(UNUSED)asset_cache_sr;
G.params.arena = AllocArena(GIBI(64));
G.params.arena = AllocArena(Gibi(64));
return (SND_StartupReceipt) { 0 };
}
@ -39,7 +39,7 @@ SND_StartupReceipt sound_startup(AC_StartupReceipt *asset_cache_sr)
* Load task param store
* ========================== */
INTERNAL struct sound_task_params *sound_task_params_alloc(void)
internal struct sound_task_params *sound_task_params_alloc(void)
{
struct sound_task_params *p = 0;
{
@ -55,7 +55,7 @@ INTERNAL struct sound_task_params *sound_task_params_alloc(void)
return p;
}
INTERNAL void sound_task_params_release(struct sound_task_params *p)
internal void sound_task_params_release(struct sound_task_params *p)
{
P_Lock lock = P_LockE(&G.params.mutex);
p->next_free = G.params.head_free;
@ -67,7 +67,7 @@ INTERNAL void sound_task_params_release(struct sound_task_params *p)
* Load
* ========================== */
INTERNAL P_JobDef(sound_load_asset_job, job)
internal P_JobDef(sound_load_asset_job, job)
{
__prof;
struct sound_task_params *params = job.sig;
@ -81,7 +81,7 @@ INTERNAL P_JobDef(sound_load_asset_job, job)
String error_msg = LIT("Unknown error");
ASSERT(string_ends_with(path, LIT(".mp3")));
Assert(string_ends_with(path, LIT(".mp3")));
/* Decode */
MP3_Result decoded = ZI;
@ -105,22 +105,23 @@ INTERNAL P_JobDef(sound_load_asset_job, job)
if (decoded.success) {
/* Store */
SND_Sound *sound = 0;
u64 samples_count = decoded.samples_count;
i16 *samples = 0;
{
AC_Store store = asset_cache_store_open();
sound = PushStructNoZero(store.arena, SND_Sound);
samples = PushArrayNoZero(store.arena, i16, decoded.pcm.count);
samples = PushArrayNoZero(store.arena, i16, samples_count);
asset_cache_store_close(&store);
}
/* Initialize */
MEMZERO_STRUCT(sound);
sound->flags = flags;
sound->pcm.count = decoded.pcm.count;
sound->pcm.samples = samples;
MEMCPY(sound->pcm.samples, decoded.pcm.samples, decoded.pcm.count * sizeof(*decoded.pcm.samples));
sound->samples_count = samples_count;
sound->samples = samples;
MEMCPY(sound->samples, decoded.samples, decoded.samples_count * sizeof(*decoded.samples));
P_LogSuccessF("Loaded sound \"%F\" in %F seconds", FMT_STR(path), FMT_FLOAT(SECONDS_FROM_NS(P_TimeNs() - start_ns)));
P_LogSuccessF("Loaded sound \"%F\" in %F seconds", FMT_STR(path), FMT_FLOAT(SecondsFromNs(P_TimeNs() - start_ns)));
asset_cache_mark_ready(asset, sound);
} else {
P_LogErrorF("Error loading sound \"%F\": %F", FMT_STR(path), FMT_STR(error_msg));

View File

@ -4,7 +4,8 @@
typedef struct SND_Sound SND_Sound;
struct SND_Sound {
u32 flags;
PcmData pcm;
u64 samples_count;
i16 *samples;
};
typedef struct SND_StartupReceipt SND_StartupReceipt;

View File

@ -1,23 +1,23 @@
/* The evictor will begin evicting once cache usage is > threshold.
* It will entries until the budget has shrunk < target. */
#define CACHE_MEMORY_BUDGET_THRESHOLD (MEBI(256))
#define CACHE_MEMORY_BUDGET_TARGET (MEBI(128))
STATIC_ASSERT(CACHE_MEMORY_BUDGET_THRESHOLD >= CACHE_MEMORY_BUDGET_TARGET);
#define CACHE_MEMORY_BUDGET_THRESHOLD (Mebi(256))
#define CACHE_MEMORY_BUDGET_TARGET (Mebi(128))
StaticAssert(CACHE_MEMORY_BUDGET_THRESHOLD >= CACHE_MEMORY_BUDGET_TARGET);
#define CACHE_BINS_COUNT 1024
#define MAX_SCOPE_REFERENCES 1024
/* How long between evictor cycles */
#define EVICTOR_CYCLE_INTERVAL_NS NS_FROM_SECONDS(0.500)
#define EVICTOR_CYCLE_INTERVAL_NS NsFromSeconds(0.500)
/* How many cycles a cache entry spends unused until it's considered evictable */
#define EVICTOR_GRACE_PERIOD_CYCLES (NS_FROM_SECONDS(10.000) / EVICTOR_CYCLE_INTERVAL_NS)
#define EVICTOR_GRACE_PERIOD_CYCLES (NsFromSeconds(10.000) / EVICTOR_CYCLE_INTERVAL_NS)
/* Texture arena only used to store texture struct at the moment. Actual image data is allocated on GPU. */
#define TEXTURE_ARENA_RESERVE MEBI(1)
#define TEXTURE_ARENA_RESERVE Mebi(1)
#define SHEET_ARENA_RESERVE MEBI(64)
#define SHEET_ARENA_RESERVE Mebi(64)
#define SHEET_SPAN_LOOKUP_TABLE_BIN_RATIO 2.0
#define SHEET_SLICE_LOOKUP_TABLE_BIN_RATIO 2.0
@ -43,7 +43,7 @@ struct cache_refcount {
i32 count; /* Number of scopes currently holding a reference to this entry */
i32 last_ref_cycle; /* Last evictor cycle that the refcount was modified */
};
STATIC_ASSERT(sizeof(struct cache_refcount) == 8); /* Must fit into 64 bit atomic */
StaticAssert(sizeof(struct cache_refcount) == 8); /* Must fit into 64 bit atomic */
struct cache_entry_hash {
u64 v;
@ -116,7 +116,7 @@ struct load_cmd {
* Global state
* ========================== */
GLOBAL struct {
Global struct {
Arena *perm_arena;
S_Texture *nil_texture;
S_Texture *loading_texture;
@ -142,13 +142,13 @@ GLOBAL struct {
b32 evictor_scheduler_shutdown;
P_Mutex evictor_scheduler_mutex;
P_Cv evictor_scheduler_shutdown_cv;
} G = ZI, DEBUG_ALIAS(G, G_sprite);
} G = ZI, DebugAlias(G, G_sprite);
/* ========================== *
* Purple-black image
* ========================== */
INTERNAL ImageDataRgba generate_purple_black_image(Arena *arena, u32 width, u32 height)
internal u32 *generate_purple_black_image(Arena *arena, u32 width, u32 height)
{
u32 *pixels = PushArrayNoZero(arena, u32, width * height);
@ -175,29 +175,25 @@ INTERNAL ImageDataRgba generate_purple_black_image(Arena *arena, u32 width, u32
}
}
return (ImageDataRgba) {
.width = width,
.height = height,
.pixels = pixels
};
return pixels;
}
/* ========================== *
* Startup
* ========================== */
INTERNAL P_ExitFuncDef(sprite_shutdown);
INTERNAL P_JobDef(sprite_load_job, arg);
INTERNAL P_JobDef(sprite_evictor_job, _);
internal P_ExitFuncDef(sprite_shutdown);
internal P_JobDef(sprite_load_job, arg);
internal P_JobDef(sprite_evictor_job, _);
#if RESOURCE_RELOADING
INTERNAL WATCH_CALLBACK_FUNC_DEF(sprite_watch_callback, info);
internal WATCH_CALLBACK_FUNC_DEF(sprite_watch_callback, info);
#endif
S_StartupReceipt sprite_startup(void)
{
__prof;
G.perm_arena = AllocArena(MEBI(1));
G.perm_arena = AllocArena(Mebi(1));
{
/* Init loading texture */
G.loading_texture = PushStruct(G.perm_arena, S_Texture);
@ -207,8 +203,10 @@ S_StartupReceipt sprite_startup(void)
G.nil_texture->loaded = 1;
{
TempArena scratch = BeginScratchNoConflict();
ImageDataRgba purple_black_image = generate_purple_black_image(scratch.arena, 64, 64);
G.nil_texture->gp_texture = gp_texture_alloc(GP_TEXTURE_FORMAT_R8G8B8A8_UNORM, 0, V2i32FromXY(purple_black_image.width, purple_black_image.height), purple_black_image.pixels);
u32 width = 64;
u32 height = 64;
u32 *pixels = generate_purple_black_image(scratch.arena, width, height);
G.nil_texture->gp_texture = gp_texture_alloc(GP_TEXTURE_FORMAT_R8G8B8A8_UNORM, 0, V2i32FromXY(width, height), pixels);
EndScratch(scratch);
}
@ -225,12 +223,12 @@ S_StartupReceipt sprite_startup(void)
}
SetArenaReadonly(G.perm_arena);
G.cache.arena = AllocArena(GIBI(64));
G.cache.arena = AllocArena(Gibi(64));
G.cache.bins = PushArray(G.cache.arena, struct cache_bin, CACHE_BINS_COUNT);
G.load_cmds_arena = AllocArena(GIBI(64));
G.load_cmds_arena = AllocArena(Gibi(64));
G.scopes_arena = AllocArena(GIBI(64));
G.scopes_arena = AllocArena(Gibi(64));
P_Run(1, sprite_evictor_job, 0, P_Pool_Background, P_Priority_Low, &G.shutdown_counter);
@ -240,14 +238,14 @@ S_StartupReceipt sprite_startup(void)
return (S_StartupReceipt) { 0 };
}
INTERNAL P_ExitFuncDef(sprite_shutdown)
internal P_ExitFuncDef(sprite_shutdown)
{
__prof;
/* Signal evictor shutdown */
{
P_Lock lock = P_LockE(&G.evictor_scheduler_mutex);
G.evictor_scheduler_shutdown = 1;
P_SignalCv(&G.evictor_scheduler_shutdown_cv, I32_MAX);
P_SignalCv(&G.evictor_scheduler_shutdown_cv, I32Max);
P_Unlock(&lock);
}
/* Wait for evictor shutdown */
@ -276,7 +274,7 @@ b32 sprite_tag_eq(S_Tag t1, S_Tag t2)
return t1.hash == t2.hash;
}
INTERNAL struct cache_entry_hash cache_entry_hash_from_tag_hash(u64 tag_hash, enum cache_entry_kind kind)
internal struct cache_entry_hash cache_entry_hash_from_tag_hash(u64 tag_hash, enum cache_entry_kind kind)
{
return (struct cache_entry_hash) { .v = rand_u64_from_seed(tag_hash + kind) };
}
@ -285,8 +283,8 @@ INTERNAL struct cache_entry_hash cache_entry_hash_from_tag_hash(u64 tag_hash, en
* Load
* ========================== */
INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref_from_ref(S_Scope *scope, struct cache_ref ref);
INTERNAL void push_load_job(struct cache_ref ref, S_Tag tag)
internal struct sprite_scope_cache_ref *scope_ensure_ref_from_ref(S_Scope *scope, struct cache_ref ref);
internal void push_load_job(struct cache_ref ref, S_Tag tag)
{
struct load_cmd *cmd = 0;
{
@ -315,7 +313,7 @@ INTERNAL void push_load_job(struct cache_ref ref, S_Tag tag)
P_Run(1, sprite_load_job, cmd, P_Pool_Background, P_Priority_Inherit, 0);
}
INTERNAL void cache_entry_load_texture(struct cache_ref ref, S_Tag tag)
internal void cache_entry_load_texture(struct cache_ref ref, S_Tag tag)
{
__prof;
TempArena scratch = BeginScratchNoConflict();
@ -328,8 +326,8 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, S_Tag tag)
b32 success = 0;
i64 start_ns = P_TimeNs();
ASSERT(string_ends_with(path, LIT(".ase")));
ASSERT(e->kind == CACHE_ENTRY_KIND_TEXTURE);
Assert(string_ends_with(path, LIT(".ase")));
Assert(e->kind == CACHE_ENTRY_KIND_TEXTURE);
/* TODO: Replace arena allocs w/ buddy allocator */
/* TODO: Arena probably overkill. Just using it to store texture struct. */
@ -351,13 +349,13 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, S_Tag tag)
if (decoded.success) {
/* Initialize */
e->texture = PushStruct(e->arena, S_Texture);
e->texture->width = decoded.image.width;
e->texture->height = decoded.image.height;
e->texture->width = decoded.width;
e->texture->height = decoded.height;
e->texture->valid = 1;
e->texture->loaded = 1;
e->texture->gp_texture = gp_texture_alloc(GP_TEXTURE_FORMAT_R8G8B8A8_UNORM_SRGB, 0, V2i32FromXY(decoded.image.width, decoded.image.height), decoded.image.pixels);
e->texture->gp_texture = gp_texture_alloc(GP_TEXTURE_FORMAT_R8G8B8A8_UNORM_SRGB, 0, V2i32FromXY(decoded.width, decoded.height), decoded.pixels);
/* TODO: Query gpu for more accurate texture size in VRAM */
memory_size += (decoded.image.width * decoded.image.height) * sizeof(*decoded.image.pixels);
memory_size += (decoded.width * decoded.height) * sizeof(*decoded.pixels);
success = 1;
}
}
@ -369,7 +367,7 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, S_Tag tag)
P_LogSuccessF("Loaded sprite texture [%F] \"%F\" in %F seconds (cache size: %F bytes).",
FMT_HEX(e->hash.v),
FMT_STR(path),
FMT_FLOAT(SECONDS_FROM_NS(P_TimeNs() - start_ns)),
FMT_FLOAT(SecondsFromNs(P_TimeNs() - start_ns)),
FMT_UINT(e->memory_usage));
}
@ -392,12 +390,12 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, S_Tag tag)
EndScratch(scratch);
}
INTERNAL S_Sheet init_sheet_from_ase_result(Arena *arena, Ase_DecodedSheet ase)
internal S_Sheet init_sheet_from_ase_result(Arena *arena, Ase_DecodedSheet ase)
{
__prof;
S_Sheet sheet = ZI;
ASSERT(ase.num_frames >= 1);
Assert(ase.num_frames >= 1);
V2 frame_size = ase.frame_size;
V2 frame_center = v2_mul(ase.frame_size, 0.5f);
@ -486,7 +484,7 @@ INTERNAL S_Sheet init_sheet_from_ase_result(Arena *arena, Ase_DecodedSheet ase)
struct temp_ase_slice_key_node *node = PushStruct(scratch.arena, struct temp_ase_slice_key_node);
node->key = ase_slice_key;
node->next = temp_slice_group_node->temp_ase_slice_key_head;
node->earliest_frame = U32_MAX; /* To be overwritten later after iterating */
node->earliest_frame = U32Max; /* To be overwritten later after iterating */
temp_slice_group_node->temp_ase_slice_key_head = node;
@ -638,7 +636,7 @@ INTERNAL S_Sheet init_sheet_from_ase_result(Arena *arena, Ase_DecodedSheet ase)
return sheet;
}
INTERNAL void cache_entry_load_sheet(struct cache_ref ref, S_Tag tag)
internal void cache_entry_load_sheet(struct cache_ref ref, S_Tag tag)
{
__prof;
TempArena scratch = BeginScratchNoConflict();
@ -651,7 +649,7 @@ INTERNAL void cache_entry_load_sheet(struct cache_ref ref, S_Tag tag)
b32 success = 0;
i64 start_ns = P_TimeNs();
ASSERT(e->kind == CACHE_ENTRY_KIND_SHEET);
Assert(e->kind == CACHE_ENTRY_KIND_SHEET);
/* TODO: Replace arena allocs w/ buddy allocator */
e->arena = AllocArena(SHEET_ARENA_RESERVE);
@ -690,7 +688,7 @@ INTERNAL void cache_entry_load_sheet(struct cache_ref ref, S_Tag tag)
P_LogSuccessF("Loaded sprite sheet [%F] \"%F\" in %F seconds (cache size: %F bytes).",
FMT_HEX(e->hash.v),
FMT_STR(path),
FMT_FLOAT(SECONDS_FROM_NS(P_TimeNs() - start_ns)),
FMT_FLOAT(SecondsFromNs(P_TimeNs() - start_ns)),
FMT_UINT(e->memory_usage));
}
@ -717,7 +715,7 @@ INTERNAL void cache_entry_load_sheet(struct cache_ref ref, S_Tag tag)
* Scope
* ========================== */
INTERNAL void refcount_add(struct cache_entry *e, i32 amount)
internal void refcount_add(struct cache_entry *e, i32 amount)
{
i32 evictor_cycle = atomic32_fetch(&G.evictor_cycle.v);
Atomic64 *refcount_atomic = &e->refcount_struct.v;
@ -728,14 +726,14 @@ INTERNAL void refcount_add(struct cache_entry *e, i32 amount)
new_refcount.last_ref_cycle = evictor_cycle;
u64 v = atomic64_fetch_test_set(refcount_atomic, old_refcount_uncast, *(u64 *)&new_refcount);
if (v == old_refcount_uncast) {
ASSERT(new_refcount.count >= 0);
Assert(new_refcount.count >= 0);
break;
}
old_refcount_uncast = v;
}
}
INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref_unsafe(S_Scope *scope, struct cache_entry *e)
internal struct sprite_scope_cache_ref *scope_ensure_ref_unsafe(S_Scope *scope, struct cache_entry *e)
{
u64 bin_index = e->hash.v % CACHE_BINS_COUNT;
@ -768,14 +766,14 @@ INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref_unsafe(S_Scope *scope,
return *slot;
}
INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref_from_entry(S_Scope *scope, struct cache_entry *e, P_Lock *bin_lock)
internal struct sprite_scope_cache_ref *scope_ensure_ref_from_entry(S_Scope *scope, struct cache_entry *e, P_Lock *bin_lock)
{
/* Guaranteed safe if caller has lock on entry's bin, since entry may not have an existing reference and could otherwise be evicted while ensuring this reference */
P_AssertLockedES(bin_lock, &G.cache.bins[e->hash.v % CACHE_BINS_COUNT].mutex);
return scope_ensure_ref_unsafe(scope, e);
}
INTERNAL struct sprite_scope_cache_ref *scope_ensure_ref_from_ref(S_Scope *scope, struct cache_ref ref)
internal struct sprite_scope_cache_ref *scope_ensure_ref_from_ref(S_Scope *scope, struct cache_ref ref)
{
/* Safe since caller has ref */
return scope_ensure_ref_unsafe(scope, ref.e);
@ -832,7 +830,7 @@ void sprite_scope_end(S_Scope *scope)
* Cache interface
* ========================== */
INTERNAL struct sprite_scope_cache_ref *cache_lookup(S_Scope *scope, struct cache_entry_hash hash, P_Lock *bin_lock)
internal struct sprite_scope_cache_ref *cache_lookup(S_Scope *scope, struct cache_entry_hash hash, P_Lock *bin_lock)
{
struct sprite_scope_cache_ref *scope_ref = 0;
@ -869,7 +867,7 @@ INTERNAL struct sprite_scope_cache_ref *cache_lookup(S_Scope *scope, struct cach
return scope_ref;
}
INTERNAL struct sprite_scope_cache_ref *cache_entry_from_tag(S_Scope *scope, S_Tag tag, enum cache_entry_kind kind, b32 force_new)
internal struct sprite_scope_cache_ref *cache_entry_from_tag(S_Scope *scope, S_Tag tag, enum cache_entry_kind kind, b32 force_new)
{
struct cache_entry_hash hash = cache_entry_hash_from_tag_hash(tag.hash, kind);
u64 bin_index = hash.v % CACHE_BINS_COUNT;
@ -948,7 +946,7 @@ INTERNAL struct sprite_scope_cache_ref *cache_entry_from_tag(S_Scope *scope, S_T
return scope_ref;
}
INTERNAL void *data_from_tag_internal(S_Scope *scope, S_Tag tag, enum cache_entry_kind kind, b32 await)
internal void *data_from_tag_internal(S_Scope *scope, S_Tag tag, enum cache_entry_kind kind, b32 await)
{
/* TODO: Replace switch statements */
void *res = 0;
@ -1109,7 +1107,7 @@ S_SheetSliceArray sprite_sheet_get_slices(S_Sheet *sheet, String name, u32 frame
* Load job
* ========================== */
INTERNAL P_JobDef(sprite_load_job, job)
internal P_JobDef(sprite_load_job, job)
{
__prof;
struct load_cmd *cmd = job.sig;
@ -1141,7 +1139,7 @@ INTERNAL P_JobDef(sprite_load_job, job)
#if RESOURCE_RELOADING
INTERNAL void reload_if_exists(S_Scope *scope, S_Tag tag, enum cache_entry_kind kind)
internal void reload_if_exists(S_Scope *scope, S_Tag tag, enum cache_entry_kind kind)
{
struct cache_entry_hash hash = cache_entry_hash_from_tag_hash(tag.hash, kind);
struct cache_bin *bin = &G.cache.bins[hash.v % CACHE_BINS_COUNT];
@ -1159,7 +1157,7 @@ INTERNAL void reload_if_exists(S_Scope *scope, S_Tag tag, enum cache_entry_kind
}
}
INTERNAL WATCH_CALLBACK_FUNC_DEF(sprite_watch_callback, name)
internal WATCH_CALLBACK_FUNC_DEF(sprite_watch_callback, name)
{
S_Scope *scope = sprite_scope_begin();
@ -1190,7 +1188,7 @@ struct evict_node {
struct evict_node *next_evicted;
};
INTERNAL SORT_COMPARE_FUNC_DEF(evict_sort, arg_a, arg_b, udata)
internal SORT_COMPARE_FUNC_DEF(evict_sort, arg_a, arg_b, udata)
{
(UNUSED)udata;
struct evict_node *a = arg_a;
@ -1210,7 +1208,7 @@ INTERNAL SORT_COMPARE_FUNC_DEF(evict_sort, arg_a, arg_b, udata)
* - The cache is over its memory budget and the node's last reference is longer ago than the grace period
* - Resource reloading is enabled and the node is out of date due to a change to its original resource file
*/
INTERNAL P_JobDef(sprite_evictor_job, _)
internal P_JobDef(sprite_evictor_job, _)
{
(UNUSED)_;
b32 shutdown = 0;
@ -1263,7 +1261,7 @@ INTERNAL P_JobDef(sprite_evictor_job, _)
}
/* Scratch arena should only contain evict array at this point */
ASSERT(((ArenaBase(scratch.arena) + scratch.arena->pos) - (sizeof(*evict_array) * evict_array_count)) == (u8 *)evict_array);
Assert(((ArenaBase(scratch.arena) + scratch.arena->pos) - (sizeof(*evict_array) * evict_array_count)) == (u8 *)evict_array);
/* Sort evict nodes */
{

View File

@ -16,7 +16,7 @@ struct S_Tag {
String path;
};
INLINE S_Tag sprite_tag_nil(void) { return (S_Tag) { 0 }; }
Inline S_Tag sprite_tag_nil(void) { return (S_Tag) { 0 }; }
S_Tag sprite_tag_from_path(String path);
b32 sprite_tag_is_nil(S_Tag tag);

View File

@ -19,7 +19,7 @@
#define TAR_TYPE_PAX_HEADER_X 'x'
#define TAR_TYPE_PAX_HEADER_G 'g'
PACK(struct tar_header {
Packed(struct tar_header {
/* Pre-posix */
u8 file_name[100];
u8 file_mode[8];
@ -44,7 +44,7 @@ PACK(struct tar_header {
u8 padding[12];
});
INTERNAL u64 str_oct_to_u64(String str)
internal u64 str_oct_to_u64(String str)
{
u64 n = 0;
for (u64 i = 0; i < str.len; ++i) {
@ -75,13 +75,13 @@ struct tar_archive tar_parse(Arena *arena, String data, String prefix)
if (!string_eq(STRING_FROM_ARRAY(header.ustar_indicator), LIT("ustar\0"))) {
/* Invalid header */
ASSERT(0);
Assert(0);
continue;
}
if (header.file_name_prefix[0] != 0) {
/* Header file name prefix not supported */
ASSERT(0);
Assert(0);
continue;
}
@ -101,7 +101,7 @@ struct tar_archive tar_parse(Arena *arena, String data, String prefix)
b32 is_dir = header.file_type == TAR_TYPE_DIRECTORY;
if (!is_dir && header.file_type != TAR_TYPE_FILE) {
/* Unsupported type */
ASSERT(header.file_type == TAR_TYPE_PAX_HEADER_X ||
Assert(header.file_type == TAR_TYPE_PAX_HEADER_X ||
header.file_type == TAR_TYPE_PAX_HEADER_G);
continue;
}
@ -158,7 +158,7 @@ struct tar_archive tar_parse(Arena *arena, String data, String prefix)
return archive;
}
READONLY GLOBAL struct tar_entry g_nil_tar_entry = ZI;
Readonly Global struct tar_entry g_nil_tar_entry = ZI;
struct tar_entry *tar_get(struct tar_archive *archive, String name)
{
u64 hash = hash_fnv64(HASH_FNV64_BASIS, name);

View File

@ -3,7 +3,7 @@ extern "C"
#include "ttf.h"
}
#if PLATFORM_WINDOWS
#if PlatformIsWindows
# include "ttf_core_dwrite.cpp"
#else
# error TTF core not implemented for this platform

View File

@ -13,7 +13,9 @@ struct TTF_Result {
TTF_Glyph *glyphs;
u16 glyphs_count;
u16 *cache_indices; /* Array of indices into the `glyphs` array in order of `cache_chars` */
ImageDataRgba image_data;
u32 image_width;
u32 image_height;
u32 *image_pixels; /* Array of [width * height] pixels */
};
typedef struct TTF_StartupReceipt TTF_StartupReceipt;

View File

@ -18,16 +18,16 @@
* Global state
* ========================== */
GLOBAL struct {
Global struct {
/* FIXME: Do we need to wrap this in a mutex? */
IDWriteFactory5 *factory;
} G = ZI, DEBUG_ALIAS(G, G_ttf_dwrite);
} G = ZI, DebugAlias(G, G_ttf_dwrite);
/* ========================== *
* Decode font
* ========================== */
INTERNAL i32 round_up(f32 x)
internal i32 round_up(f32 x)
{
i32 r = (i32)x;
if ((f32)r < x) {
@ -40,12 +40,12 @@ INTERNAL i32 round_up(f32 x)
TTF_StartupReceipt ttf_startup(void)
{
__prof;
ASSERT(!G.factory);
Assert(!G.factory);
/* FIXME: I think IDWriteFactory5 only exists on later updates of windows
* 10? Need to verify. Maybe should just use a custom loader. (We're only
* using a factory5 since I think WriteInMemoryFileLoader wasn't
* implemented until then) */
#if COMPILER_CLANG
#if CompilerIsClang
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wlanguage-extension-token" /* for __uuidof */
#endif
@ -54,7 +54,7 @@ TTF_StartupReceipt ttf_startup(void)
__uuidof(IDWriteFactory5),
(IUnknown **)&G.factory
);
#if COMPILER_CLANG
#if CompilerIsClang
# pragma clang diagnostic pop
#endif
if (error != S_OK) {
@ -69,8 +69,8 @@ TTF_StartupReceipt ttf_startup(void)
TTF_Result ttf_decode(Arena *arena, String encoded, f32 point_size, u32 *cache_codes, u32 cache_codes_count)
{
__prof;
COLORREF bg_color = RGB32(0,0,0);
COLORREF fg_color = RGB32(255,255,255);
COLORREF bg_color = Rgb32(0,0,0);
COLORREF fg_color = Rgb32(255,255,255);
IDWriteFactory5 *factory = G.factory;
@ -127,8 +127,8 @@ TTF_Result ttf_decode(Arena *arena, String encoded, f32 point_size, u32 *cache_c
f32 raster_target_x = (f32)(raster_target_w / 2);
f32 raster_target_y = raster_target_x;
ASSERT((f32)((i32)raster_target_x) == raster_target_x);
ASSERT((f32)((i32)raster_target_y) == raster_target_y);
Assert((f32)((i32)raster_target_x) == raster_target_x);
Assert((f32)((i32)raster_target_y) == raster_target_y);
/* Glyph count */
u16 glyph_count = font_face->GetGlyphCount();
@ -252,7 +252,7 @@ TTF_Result ttf_decode(Arena *arena, String encoded, f32 point_size, u32 *cache_c
u64 in_x = (u64)bounding_box.left + x;
u32 *out_pixel = out_data + (out_x + (out_y * atlas_w));
u32 *in_pixel = in_data + (in_x + (in_y * in_pitch));
*out_pixel = RGBA32(0xFF, 0xFF, 0xFF, *in_pixel & 0xFF);
*out_pixel = Rgba32(0xFF, 0xFF, 0xFF, *in_pixel & 0xFF);
}
}
out_offset_x += tex_w;
@ -296,8 +296,8 @@ TTF_Result ttf_decode(Arena *arena, String encoded, f32 point_size, u32 *cache_c
result.glyphs = glyphs;
result.glyphs_count = glyph_count;
result.cache_indices = cache_indices;
result.image_data.pixels = (u32 *)atlas_memory;
result.image_data.width = (u32)atlas_w;
result.image_data.height = (u32)atlas_h;
result.image_width = (u32)atlas_w;
result.image_height = (u32)atlas_h;
result.image_pixels = (u32 *)atlas_memory;
return result;
}

View File

@ -23,7 +23,7 @@ struct console_log {
struct console_log *next;
};
GLOBAL struct {
Global struct {
Atomic32 shutdown;
P_Counter shutdown_job_counters;
P_Window *window;
@ -117,7 +117,7 @@ GLOBAL struct {
V2 world_cursor;
V2 focus_send;
} G = ZI, DEBUG_ALIAS(G, G_user);
} G = ZI, DebugAlias(G, G_user);
/* ========================== *
* Bind state
@ -125,7 +125,7 @@ GLOBAL struct {
/* TODO: Remove this */
GLOBAL READONLY enum user_bind_kind g_binds[P_Btn_Count] = {
Global Readonly enum user_bind_kind g_binds[P_Btn_Count] = {
[P_Btn_W] = USER_BIND_KIND_MOVE_UP,
[P_Btn_S] = USER_BIND_KIND_MOVE_DOWN,
[P_Btn_A] = USER_BIND_KIND_MOVE_LEFT,
@ -161,7 +161,7 @@ GLOBAL READONLY enum user_bind_kind g_binds[P_Btn_Count] = {
[P_Btn_MWheelDown] = USER_BIND_KIND_ZOOM_OUT,
[P_Btn_M3] = USER_BIND_KIND_PAN,
#if RTC
#if RtcIsEnabled
/* Debug */
[P_Btn_ForwardSlash] = USER_BIND_KIND_RESET_DEBUG_STEPS,
[P_Btn_Comma] = USER_BIND_KIND_DECR_DEBUG_STEPS,
@ -173,10 +173,10 @@ GLOBAL READONLY enum user_bind_kind g_binds[P_Btn_Count] = {
* Startup
* ========================== */
INTERNAL P_ExitFuncDef(user_shutdown);
INTERNAL P_LogEventCallbackFuncDef(debug_console_log_callback, log);
INTERNAL P_JobDef(user_update_job, _);
INTERNAL P_JobDef(local_sim_job , _);
internal P_ExitFuncDef(user_shutdown);
internal P_LogEventCallbackFuncDef(debug_console_log_callback, log);
internal P_JobDef(user_update_job, _);
internal P_JobDef(local_sim_job , _);
struct user_startup_receipt user_startup(F_StartupReceipt *font_sr,
S_StartupReceipt *sprite_sr,
@ -198,16 +198,16 @@ struct user_startup_receipt user_startup(F_StartupReceipt *font_sr,
(UNUSED)host_sr;
(UNUSED)sim_sr;
gstat_set(GSTAT_DEBUG_STEPS, U64_MAX);
gstat_set(GSTAT_DEBUG_STEPS, U64Max);
G.arena = AllocArena(GIBI(64));
G.arena = AllocArena(Gibi(64));
G.real_time_ns = P_TimeNs();
/* TODO: Remove this */
G.connect_address_str = string_copy(G.arena, connect_address_str);
/* Initialize average dt to a reasonable value */
G.average_local_to_user_snapshot_publish_dt_ns = NS_FROM_SECONDS(1) / SIM_TICKS_PER_SECOND;
G.average_local_to_user_snapshot_publish_dt_ns = NsFromSeconds(1) / SIM_TICKS_PER_SECOND;
/* User blend clients */
G.user_client_store = sim_client_store_alloc();
@ -224,7 +224,7 @@ struct user_startup_receipt user_startup(F_StartupReceipt *font_sr,
G.world_to_render_xf = XFORM_IDENT;
G.render_sig = gp_render_sig_alloc();
G.console_logs_arena = AllocArena(GIBI(64));
G.console_logs_arena = AllocArena(Gibi(64));
//P_RegisterLogCallback(debug_console_log_callback, P_LogLevel_Success);
P_RegisterLogCallback(debug_console_log_callback, P_LogLevel_Debug);
@ -240,7 +240,7 @@ struct user_startup_receipt user_startup(F_StartupReceipt *font_sr,
return (struct user_startup_receipt) { 0 };
}
INTERNAL P_ExitFuncDef(user_shutdown)
internal P_ExitFuncDef(user_shutdown)
{
__prof;
atomic32_fetch_set(&G.shutdown, 1);
@ -253,7 +253,7 @@ INTERNAL P_ExitFuncDef(user_shutdown)
* ========================== */
/* TODO: remove this (testing) */
INTERNAL void debug_draw_xform(Xform xf, u32 color_x, u32 color_y)
internal void debug_draw_xform(Xform xf, u32 color_x, u32 color_y)
{
f32 thickness = 2.f;
f32 arrowhead_len = 15.f;
@ -269,18 +269,18 @@ INTERNAL void debug_draw_xform(Xform xf, u32 color_x, u32 color_y)
draw_arrow_ray(G.render_sig, pos, x_ray, thickness, arrowhead_len, color_x);
draw_arrow_ray(G.render_sig, pos, y_ray, thickness, arrowhead_len, color_y);
//u32 color_quad = RGBA32_F(0, 1, 1, 0.3);
//u32 color_quad = Rgba32F(0, 1, 1, 0.3);
//Quad quad = quad_from_rect(RECT(0, 0, 1, -1));
//quad = xform_mul_quad(xf, quad_scale(quad, 0.075f));
//draw_quad(G.render_sig, quad, color);
}
INTERNAL void debug_draw_movement(Ent *ent)
internal void debug_draw_movement(Ent *ent)
{
f32 thickness = 2.f;
f32 arrow_len = 15.f;
u32 color_vel = COLOR_ORANGE;
u32 color_vel = ColorOrange;
Xform xf = sim_ent_get_xform(ent);
V2 velocity = ent->linear_velocity;
@ -293,7 +293,7 @@ INTERNAL void debug_draw_movement(Ent *ent)
}
}
INTERNAL String get_ent_debug_text(Arena *arena, Ent *ent)
internal String get_ent_debug_text(Arena *arena, Ent *ent)
{
TempArena scratch = BeginScratch(arena);
Snapshot *ss = ent->ss;
@ -385,7 +385,7 @@ INTERNAL String get_ent_debug_text(Arena *arena, Ent *ent)
* Debug console
* ========================== */
INTERNAL P_LogEventCallbackFuncDef(debug_console_log_callback, log)
internal P_LogEventCallbackFuncDef(debug_console_log_callback, log)
{
__prof;
P_Lock lock = P_LockE(&G.console_logs_mutex);
@ -411,30 +411,30 @@ INTERNAL P_LogEventCallbackFuncDef(debug_console_log_callback, log)
P_Unlock(&lock);
}
INTERNAL void draw_debug_console(i32 level, b32 minimized)
internal void draw_debug_console(i32 level, b32 minimized)
{
__prof;
TempArena scratch = BeginScratchNoConflict();
V2 desired_start_pos = V2FromXY(10, minimized ? 100 : 600);
i64 fade_time_ns = NS_FROM_SECONDS(10);
i64 fade_time_ns = NsFromSeconds(10);
f32 fade_curve = 0.5;
f32 spacing = 0;
f32 bg_margin = 5;
LOCAL_PERSIST u32 colors[P_LogLevel_Count][2] = ZI;
LocalPersist u32 colors[P_LogLevel_Count][2] = ZI;
MEMSET(colors, 0xFF, sizeof(colors));
#if 1
colors[P_LogLevel_Debug][0] = RGB32_F(0.4, 0.1, 0.4); colors[P_LogLevel_Debug][1] = RGB32_F(0.5, 0.2, 0.5);
colors[P_LogLevel_Info][0] = RGB32_F(0.4, 0.4, 0.4); colors[P_LogLevel_Info][1] = RGB32_F(0.5, 0.5, 0.5);
colors[P_LogLevel_Success][0] = RGB32_F(0.1, 0.3, 0.1); colors[P_LogLevel_Success][1] = RGB32_F(0.2, 0.4, 0.2);
colors[P_LogLevel_Warning][0] = RGB32_F(0.4, 0.4, 0.1); colors[P_LogLevel_Warning][1] = RGB32_F(0.5, 0.5, 0.2);
colors[P_LogLevel_Error][0] = RGB32_F(0.4, 0.1, 0.1); colors[P_LogLevel_Error][1] = RGB32_F(0.5, 0.2, 0.2);
colors[P_LogLevel_Debug][0] = Rgb32F(0.4, 0.1, 0.4); colors[P_LogLevel_Debug][1] = Rgb32F(0.5, 0.2, 0.5);
colors[P_LogLevel_Info][0] = Rgb32F(0.4, 0.4, 0.4); colors[P_LogLevel_Info][1] = Rgb32F(0.5, 0.5, 0.5);
colors[P_LogLevel_Success][0] = Rgb32F(0.1, 0.3, 0.1); colors[P_LogLevel_Success][1] = Rgb32F(0.2, 0.4, 0.2);
colors[P_LogLevel_Warning][0] = Rgb32F(0.4, 0.4, 0.1); colors[P_LogLevel_Warning][1] = Rgb32F(0.5, 0.5, 0.2);
colors[P_LogLevel_Error][0] = Rgb32F(0.4, 0.1, 0.1); colors[P_LogLevel_Error][1] = Rgb32F(0.5, 0.2, 0.2);
#else
u32 info_colors[2] = { RGB32_F(0.4, 0.4, 0.4), RGB32_F(0.5, 0.5, 0.5) };
u32 success_colors[2] = { RGB32_F(0.1, 0.3, 0.1), RGB32_F(0.2, 0.4, 0.2) };
u32 warning_colors[2] = { RGB32_F(0.4, 0.4, 0.1), RGB32_F(0.5, 0.5, 0.2) };
u32 error_colors[2] = { RGB32_F(0.4, 0.1, 0.1), RGB32_F(0.5, 0.2, 0.2) };
u32 info_colors[2] = { Rgb32F(0.4, 0.4, 0.4), Rgb32F(0.5, 0.5, 0.5) };
u32 success_colors[2] = { Rgb32F(0.1, 0.3, 0.1), Rgb32F(0.2, 0.4, 0.2) };
u32 warning_colors[2] = { Rgb32F(0.4, 0.4, 0.1), Rgb32F(0.5, 0.5, 0.2) };
u32 error_colors[2] = { Rgb32F(0.4, 0.1, 0.1), Rgb32F(0.5, 0.2, 0.2) };
#endif
V2 draw_pos = desired_start_pos;
@ -461,7 +461,7 @@ INTERNAL void draw_debug_console(i32 level, b32 minimized)
if (log->level <= level) {
/* Draw background */
u32 color = colors[log->level][log->color_index];
draw_quad(G.render_sig, quad_from_rect(log->bounds), ALPHA32_F(color, opacity));
draw_quad(G.render_sig, quad_from_rect(log->bounds), Alpha32F(color, opacity));
/* Draw text */
String text = log->msg;
@ -477,7 +477,7 @@ INTERNAL void draw_debug_console(i32 level, b32 minimized)
FMT_STR(text));
}
D_TextParams params = DRAW_TEXT_PARAMS(.font = font, .pos = draw_pos, .offset_y = DRAW_TEXT_OFFSET_Y_BOTTOM, .color = ALPHA32_F(COLOR_WHITE, opacity), .str = text);
D_TextParams params = DRAW_TEXT_PARAMS(.font = font, .pos = draw_pos, .offset_y = DRAW_TEXT_OFFSET_Y_BOTTOM, .color = Alpha32F(ColorWhite, opacity), .str = text);
Rect bounds = draw_text(G.render_sig, params);
Rect draw_bounds = bounds;
@ -508,7 +508,7 @@ INTERNAL void draw_debug_console(i32 level, b32 minimized)
* Sort entities
* ========================== */
INTERNAL SORT_COMPARE_FUNC_DEF(ent_draw_order_cmp, arg_a, arg_b, udata)
internal SORT_COMPARE_FUNC_DEF(ent_draw_order_cmp, arg_a, arg_b, udata)
{
(UNUSED)udata;
Ent *a = *(Ent **)arg_a;
@ -548,7 +548,7 @@ INTERNAL SORT_COMPARE_FUNC_DEF(ent_draw_order_cmp, arg_a, arg_b, udata)
* Update
* ========================== */
INTERNAL void user_update(P_Window *window)
internal void user_update(P_Window *window)
{
__prof;
@ -615,7 +615,7 @@ INTERNAL void user_update(P_Window *window)
G.render_time_ns += G.real_dt_ns * sim_publish_timescale;
}
i64 render_time_target_diff_ns = G.render_time_target_ns - G.render_time_ns;
if (render_time_target_diff_ns > NS_FROM_SECONDS(0.010) || render_time_target_diff_ns < NS_FROM_SECONDS(-0.005)) {
if (render_time_target_diff_ns > NsFromSeconds(0.010) || render_time_target_diff_ns < NsFromSeconds(-0.005)) {
/* Snap render time if it gets too out of sync with target render time */
G.render_time_ns = G.render_time_target_ns;
}
@ -667,7 +667,7 @@ INTERNAL void user_update(P_Window *window)
/* Release unneeded blended snapshots */
if (G.ss_blended->tick > 0) {
sim_snapshot_release_ticks_in_range(G.user_blended_client, 0, G.ss_blended->tick - 1);
sim_snapshot_release_ticks_in_range(G.user_blended_client, G.ss_blended->tick + 1, U64_MAX);
sim_snapshot_release_ticks_in_range(G.user_blended_client, G.ss_blended->tick + 1, U64Max);
}
}
@ -841,7 +841,7 @@ INTERNAL void user_update(P_Window *window)
if (!sim_ent_is_valid_and_active(ent)) continue;
/* How much time between camera shakes */
i64 frequency_ns = NS_FROM_SECONDS(0.01f);
i64 frequency_ns = NsFromSeconds(0.01f);
f32 shake = ent->shake;
if (shake > 0) {
u64 angle_seed0 = ent->id.uid.lo + (u64)(G.ss_blended->sim_time_ns / frequency_ns);
@ -1029,9 +1029,9 @@ INTERNAL void user_update(P_Window *window)
V2 pos = xform_invert_mul_v2(G.world_to_render_xf, V2FromXY(0, 0));
V2 size = xform_basis_invert_mul_v2(G.world_to_render_xf, G.render_size);
u32 color0 = RGBA32_F(0.17f, 0.17f, 0.17f, 1.f);
u32 color1 = RGBA32_F(0.15f, 0.15f, 0.15f, 1.f);
draw_grid(G.render_sig, xform_from_rect(RECT_FROM_V2(pos, size)), color0, color1, RGBA32(0x3f, 0x3f, 0x3f, 0xFF), COLOR_RED, COLOR_GREEN, thickness, spacing, offset);
u32 color0 = Rgba32F(0.17f, 0.17f, 0.17f, 1.f);
u32 color1 = Rgba32F(0.15f, 0.15f, 0.15f, 1.f);
draw_grid(G.render_sig, xform_from_rect(RECT_FROM_V2(pos, size)), color0, color1, Rgba32(0x3f, 0x3f, 0x3f, 0xFF), ColorRed, ColorGreen, thickness, spacing, offset);
}
#if 0
@ -1211,8 +1211,8 @@ INTERNAL void user_update(P_Window *window)
}
f32 thickness = 0.01f;
u32 color_start = RGBA32_F(1, 0.5, 0, opacity_a);
u32 color_end = RGBA32_F(1, 0.8, 0.4, opacity_b);
u32 color_start = Rgba32F(1, 0.5, 0, opacity_a);
u32 color_end = Rgba32F(1, 0.8, 0.4, opacity_b);
if (opacity_b > 0.99f) {
draw_circle(G.render_sig, b, thickness / 2, color_end, 20);
@ -1272,8 +1272,8 @@ INTERNAL void user_update(P_Window *window)
/* Draw xform */
if (!skip_debug_draw_transform) {
u32 color_x = RGBA32_F(1, 0, 0, 0.5);
u32 color_y = RGBA32_F(0, 1, 0, 0.5);
u32 color_x = Rgba32F(1, 0, 0, 0.5);
u32 color_y = Rgba32F(0, 1, 0, 0.5);
debug_draw_xform(xf, color_x, color_y);
}
@ -1281,7 +1281,7 @@ INTERNAL void user_update(P_Window *window)
if (ent->local_collider.count > 0) {
Aabb aabb = collider_aabb_from_collider(&ent->local_collider, xf);
f32 thickness = 1;
u32 color = RGBA32_F(1, 0, 1, 0.5);
u32 color = Rgba32F(1, 0, 1, 0.5);
Quad quad = quad_from_aabb(aabb);
quad = xform_mul_quad(G.world_to_ui_xf, quad);
draw_quad_line(G.render_sig, quad, thickness, color);
@ -1295,7 +1295,7 @@ INTERNAL void user_update(P_Window *window)
start = xform_mul_v2(G.world_to_ui_xf, start);
V2 end = v2_add(xf.og, ent->control.focus);
end = xform_mul_v2(G.world_to_ui_xf, end);
draw_arrow_line(G.render_sig, start, end, 3, 10, RGBA32_F(1, 1, 1, 0.5));
draw_arrow_line(G.render_sig, start, end, 3, 10, Rgba32F(1, 1, 1, 0.5));
}
#if 0
@ -1303,9 +1303,9 @@ INTERNAL void user_update(P_Window *window)
if (!sprite_tag_is_nil(ent->sprite)) {
S_Sheet *sheet = sprite_sheet_from_tag_async(sprite_frame_scope, sprite);
u32 quad_color = RGBA32_F(1, 0, 0.5, 1);
u32 point_color = RGBA32_F(1, 0, 0, 1);
u32 ray_color = RGBA32_F(1, 0, 0.5, 1);
u32 quad_color = Rgba32F(1, 0, 0.5, 1);
u32 point_color = Rgba32F(1, 0, 0, 1);
u32 ray_color = Rgba32F(1, 0, 0.5, 1);
for (u64 i = 0; i < sheet->slice_groups_count; ++i) {
S_SheetSliceGroup *group = &sheet->slice_groups[i];
@ -1343,7 +1343,7 @@ INTERNAL void user_update(P_Window *window)
Ent *e1 = sim_ent_from_id(G.ss_blended, ent->weld_joint_data.e1);
Xform e1_xf = sim_ent_get_xform(e1);
u32 color = COLOR_YELLOW;
u32 color = ColorYellow;
f32 radius = 3;
V2 point = xform_mul_v2(e1_xf, ent->weld_joint_data.point_local_e1);
point = xform_mul_v2(G.world_to_ui_xf, point);
@ -1357,7 +1357,7 @@ INTERNAL void user_update(P_Window *window)
if (sim_ent_has_prop(ent, SEPROP_MOUSE_JOINT)) {
Ent *target = sim_ent_from_id(G.ss_blended, ent->mouse_joint_data.target);
Xform target_xf = sim_ent_get_xform(target);
u32 color = COLOR_WHITE;
u32 color = ColorWhite;
V2 point_start = xform_mul_v2(target_xf, ent->mouse_joint_data.point_local_start);
V2 point_end = G.world_cursor;
point_start = xform_mul_v2(G.world_to_ui_xf, point_start);
@ -1369,7 +1369,7 @@ INTERNAL void user_update(P_Window *window)
/* Draw collider */
if (ent->local_collider.count > 0) {
CLD_Shape collider = ent->local_collider;
u32 color = RGBA32_F(1, 1, 0, 0.5);
u32 color = Rgba32F(1, 1, 0, 0.5);
f32 thickness = 2;
{
/* Draw collider using support points */
@ -1381,7 +1381,7 @@ INTERNAL void user_update(P_Window *window)
/* Draw collider shape points */
for (u32 i = 0; i < collider.count; ++i) {
V2 p = xform_mul_v2(xform_mul(G.world_to_ui_xf, xf), collider.points[i]);
draw_circle(G.render_sig, p, 3, COLOR_BLUE, 10);
draw_circle(G.render_sig, p, 3, ColorBlue, 10);
}
}
if (collider.count == 1 && collider.radius > 0) {
@ -1397,7 +1397,7 @@ INTERNAL void user_update(P_Window *window)
{
V2 p = collider_support_point(&collider, xf, ent->control.focus);
p = xform_mul_v2(G.world_to_ui_xf, p);
draw_circle(G.render_sig, p, 3, COLOR_RED, 10);
draw_circle(G.render_sig, p, 3, ColorRed, 10);
}
#endif
}
@ -1410,12 +1410,12 @@ INTERNAL void user_update(P_Window *window)
(UNUSED)e0;
(UNUSED)e1;
#if DEVELOPER
#if DeveloperIsEnabled
/* Draw contact points */
{
f32 radius = 5;
for (u32 i = 0; i < data->num_points; ++i) {
u32 color = (data->skip_solve || data->wrong_dir) ? ALPHA32_F(COLOR_YELLOW, 0.3) : RGBA32_F(0.8, 0.2, 0.2, 1);
u32 color = (data->skip_solve || data->wrong_dir) ? Alpha32F(ColorYellow, 0.3) : Rgba32F(0.8, 0.2, 0.2, 1);
ContactPoint point = data->points[i];
V2 dbg_pt = point.dbg_pt;
@ -1486,7 +1486,7 @@ INTERNAL void user_update(P_Window *window)
#if 0
{
f32 radius = 4;
u32 color = RGBA32_F(1, 1, 0, 0.5);
u32 color = Rgba32F(1, 1, 0, 0.5);
V2 a = xform_mul_v2(G.world_to_ui_xf, data->closest0);
V2 b = xform_mul_v2(G.world_to_ui_xf, data->closest1);
draw_circle(G.render_sig, a, radius, color, 10);
@ -1498,12 +1498,12 @@ INTERNAL void user_update(P_Window *window)
{
f32 thickness = 4;
f32 radius = 4;
u32 color_line = RGBA32_F(1, 0, 1, 0.75);
u32 color_a = RGBA32_F(1, 0, 0, 0.25);
u32 color_b = RGBA32_F(0, 1, 0, 0.25);
u32 color_line_clipped = RGBA32_F(1, 0, 1, 1);
u32 color_a_clipped = RGBA32_F(1, 0, 0, 1);
u32 color_b_clipped = RGBA32_F(0, 1, 0, 1);
u32 color_line = Rgba32F(1, 0, 1, 0.75);
u32 color_a = Rgba32F(1, 0, 0, 0.25);
u32 color_b = Rgba32F(0, 1, 0, 0.25);
u32 color_line_clipped = Rgba32F(1, 0, 1, 1);
u32 color_a_clipped = Rgba32F(1, 0, 0, 1);
u32 color_b_clipped = Rgba32F(0, 1, 0, 1);
{
V2 a = xform_mul_v2(G.world_to_ui_xf, collider_res.a0);
V2 b = xform_mul_v2(G.world_to_ui_xf, collider_res.b0);
@ -1577,7 +1577,7 @@ INTERNAL void user_update(P_Window *window)
/* Draw menkowski */
{
u32 color = collider_res.solved ? RGBA32_F(0, 0, 0.25, 1) : RGBA32_F(0, 0.25, 0.25, 1);
u32 color = collider_res.solved ? Rgba32F(0, 0, 0.25, 1) : Rgba32F(0, 0.25, 0.25, 1);
f32 thickness = 2;
u32 detail = 512;
(UNUSED)thickness;
@ -1591,7 +1591,7 @@ INTERNAL void user_update(P_Window *window)
/* Draw cloud */
{
u32 color = RGBA32_F(1, 1, 1, 1);
u32 color = Rgba32F(1, 1, 1, 1);
f32 radius = 2;
V2Array m = cloud(temp.arena, &e0_collider, &e1_collider, e0_xf, e1_xf);
@ -1605,7 +1605,7 @@ INTERNAL void user_update(P_Window *window)
/* Draw prototype */
{
f32 thickness = 2;
u32 color = RGBA32_F(1, 1, 1, 0.25);
u32 color = Rgba32F(1, 1, 1, 0.25);
V2Array m = {
.points = collider_res.prototype.points,
@ -1619,10 +1619,10 @@ INTERNAL void user_update(P_Window *window)
/* Draw simplex */
{
f32 thickness = 2;
u32 line_color = COLOR_YELLOW;
u32 color_first = RGBA32_F(1, 0, 0, 0.75);
u32 color_second = RGBA32_F(0, 1, 0, 0.75);
u32 color_third = RGBA32_F(0, 0, 1, 0.75);
u32 line_color = ColorYellow;
u32 color_first = Rgba32F(1, 0, 0, 0.75);
u32 color_second = Rgba32F(0, 1, 0, 0.75);
u32 color_third = Rgba32F(0, 0, 1, 0.75);
struct collider_menkowski_simplex simplex = collider_res.simplex;
V2 simplex_points[] = { simplex.a.p, simplex.b.p, simplex.c.p };
@ -1648,7 +1648,7 @@ INTERNAL void user_update(P_Window *window)
/* Draw normal */
{
u32 color = COLOR_WHITE;
u32 color = ColorWhite;
f32 len = 0.1f;
f32 arrow_thickness = 4;
f32 arrowhead_height = 10;
@ -1663,7 +1663,7 @@ INTERNAL void user_update(P_Window *window)
/* Draw hierarchy */
if (sim_ent_has_prop(parent, SEPROP_ACTIVE) && !parent->is_root) {
u32 color = RGBA32_F(0.6, 0.6, 1, 0.75);
u32 color = Rgba32F(0.6, 0.6, 1, 0.75);
f32 thickness = 2;
f32 arrow_height = 15;
@ -1674,7 +1674,7 @@ INTERNAL void user_update(P_Window *window)
/* Draw camera rect */
if (sim_ent_has_prop(ent, SEPROP_CAMERA)) {
u32 color = ent == local_camera ? RGBA32_F(1, 1, 1, 0.5) : RGBA32_F(0, 0.75, 0, 0.5);
u32 color = ent == local_camera ? Rgba32F(1, 1, 1, 0.5) : Rgba32F(0, 0.75, 0, 0.5);
f32 thickness = 3;
Xform quad_xf = xform_mul(xf, ent->camera_quad_xform);
@ -1864,7 +1864,7 @@ INTERNAL void user_update(P_Window *window)
}
}
#if RTC
#if RtcIsEnabled
/* Gjk steps */
{
if (G.bind_states[USER_BIND_KIND_RESET_DEBUG_STEPS].num_presses_and_repeats > 0) {
@ -1885,7 +1885,7 @@ INTERNAL void user_update(P_Window *window)
i64 stat_now_ns = P_TimeNs();
G.net_bytes_read.last_second_end = gstat_get(GSTAT_SOCK_BYTES_RECEIVED);
G.net_bytes_sent.last_second_end = gstat_get(GSTAT_SOCK_BYTES_SENT);
if (stat_now_ns - G.last_second_reset_ns > NS_FROM_SECONDS(1)) {
if (stat_now_ns - G.last_second_reset_ns > NsFromSeconds(1)) {
G.last_second_reset_ns = stat_now_ns;
G.net_bytes_read.last_second = G.net_bytes_read.last_second_end - G.net_bytes_read.last_second_start;
G.net_bytes_sent.last_second = G.net_bytes_sent.last_second_end - G.net_bytes_sent.last_second_start;
@ -1944,26 +1944,26 @@ INTERNAL void user_update(P_Window *window)
text.len += string_format(temp.arena, LIT("blended world tick: %F"), FMT_UINT(G.ss_blended->tick)).len;
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_format(temp.arena, LIT("blended world time: %F"), FMT_FLOAT(SECONDS_FROM_NS(G.ss_blended->sim_time_ns))).len;
text.len += string_format(temp.arena, LIT("blended world time: %F"), FMT_FLOAT(SecondsFromNs(G.ss_blended->sim_time_ns))).len;
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_format(temp.arena, LIT("average local sim publish dt: %F"), FMT_FLOAT(SECONDS_FROM_NS(G.average_local_to_user_snapshot_publish_dt_ns))).len;
text.len += string_format(temp.arena, LIT("average local sim publish dt: %F"), FMT_FLOAT(SecondsFromNs(G.average_local_to_user_snapshot_publish_dt_ns))).len;
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_format(temp.arena, LIT("local sim last known tick: %F"), FMT_UINT(G.local_sim_last_known_tick)).len;
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_format(temp.arena, LIT("local sim last known time: %F"), FMT_FLOAT(SECONDS_FROM_NS(G.local_sim_last_known_time_ns))).len;
text.len += string_format(temp.arena, LIT("local sim last known time: %F"), FMT_FLOAT(SecondsFromNs(G.local_sim_last_known_time_ns))).len;
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_format(temp.arena, LIT("local sim predicted time: %F"), FMT_FLOAT(SECONDS_FROM_NS(G.local_sim_predicted_time_ns))).len;
text.len += string_format(temp.arena, LIT("local sim predicted time: %F"), FMT_FLOAT(SecondsFromNs(G.local_sim_predicted_time_ns))).len;
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_format(temp.arena, LIT("render time target: %F"), FMT_FLOAT(SECONDS_FROM_NS(G.render_time_target_ns))).len;
text.len += string_format(temp.arena, LIT("render time target: %F"), FMT_FLOAT(SecondsFromNs(G.render_time_target_ns))).len;
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_format(temp.arena, LIT("render time: %F"), FMT_FLOAT(SECONDS_FROM_NS(G.render_time_ns))).len;
text.len += string_format(temp.arena, LIT("render time: %F"), FMT_FLOAT(SecondsFromNs(G.render_time_ns))).len;
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_copy(temp.arena, LIT("\n")).len;
@ -1994,7 +1994,7 @@ INTERNAL void user_update(P_Window *window)
text.len += string_format(temp.arena, LIT("Network write: %F mbit/s"), FMT_FLOAT((f64)G.net_bytes_sent.last_second * 8 / 1000 / 1000)).len;
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_format(temp.arena, LIT("Ping (real): %F ms"), FMT_FLOAT(SECONDS_FROM_NS(local_player->player_last_rtt_ns) * 1000)).len;
text.len += string_format(temp.arena, LIT("Ping (real): %F ms"), FMT_FLOAT(SecondsFromNs(local_player->player_last_rtt_ns) * 1000)).len;
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_format(temp.arena, LIT("Ping (average): %F ms"), FMT_FLOAT(local_player->player_average_rtt_seconds * 1000)).len;
@ -2017,7 +2017,7 @@ INTERNAL void user_update(P_Window *window)
//text.len += string_copy(temp.arena, LIT("\n")).len;
//text.len += string_copy(temp.arena, LIT("\n")).len;
#if RTC
#if RtcIsEnabled
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_copy(temp.arena, LIT("\n")).len;
text.len += string_format(temp.arena, LIT("Debug steps: %F"), FMT_UINT(gstat_get(GSTAT_DEBUG_STEPS))).len;
@ -2029,13 +2029,13 @@ INTERNAL void user_update(P_Window *window)
V2 pos = V2FromXY(10, G.ui_size.y);
D_TextOffsetY offset_y = DRAW_TEXT_OFFSET_Y_BOTTOM;
draw_text(G.render_sig, DRAW_TEXT_PARAMS(.font = font, .pos = pos, .str = text, .offset_y = offset_y, .color = COLOR_WHITE));
draw_text(G.render_sig, DRAW_TEXT_PARAMS(.font = font, .pos = pos, .str = text, .offset_y = offset_y, .color = ColorWhite));
EndTempArena(temp);
}
}
{
#if DEVELOPER
#if DeveloperIsEnabled
b32 console_minimized = !G.debug_console;
i32 console_level = console_minimized ? P_LogLevel_Success: P_LogLevel_Debug;
draw_debug_console(console_level, console_minimized);
@ -2081,7 +2081,7 @@ INTERNAL void user_update(P_Window *window)
EndScratch(scratch);
}
INTERNAL P_JobDef(user_update_job, _)
internal P_JobDef(user_update_job, _)
{
(UNUSED)_;
i64 time_ns = P_TimeNs();
@ -2117,7 +2117,7 @@ INTERNAL P_JobDef(user_update_job, _)
INTERNAL void generate_user_input_cmds(Client *user_input_client, u64 tick)
internal void generate_user_input_cmds(Client *user_input_client, u64 tick)
{
Snapshot *prev_user_input_ss = sim_snapshot_from_tick(user_input_client, user_input_client->last_tick);
Snapshot *user_input_ss = sim_snapshot_alloc(user_input_client, prev_user_input_ss, tick);
@ -2179,7 +2179,7 @@ struct sim_decode_queue {
};
INTERNAL P_JobDef(local_sim_job, _)
internal P_JobDef(local_sim_job, _)
{
(UNUSED)_;
@ -2203,8 +2203,8 @@ INTERNAL P_JobDef(local_sim_job, _)
is_master = 1;
}
BB_Buff msg_writer_bb = AllocBitbuff(GIBI(64));
BB_Buff snapshot_writer_bb = AllocBitbuff(GIBI(64));
BB_Buff msg_writer_bb = AllocBitbuff(Gibi(64));
BB_Buff snapshot_writer_bb = AllocBitbuff(Gibi(64));
SimAccel accel = sim_accel_alloc();
ClientStore *store = sim_client_store_alloc();
@ -2254,7 +2254,7 @@ INTERNAL P_JobDef(local_sim_job, _)
i64 last_publish_to_user_ns = 0;
i64 real_time_ns = 0;
i64 real_dt_ns = 0;
i64 step_dt_ns = NS_FROM_SECONDS(1) / SIM_TICKS_PER_SECOND;
i64 step_dt_ns = NsFromSeconds(1) / SIM_TICKS_PER_SECOND;
f64 compute_timescale = 1.0;
while (!atomic32_fetch(&G.shutdown)) {
TempArena scratch = BeginScratchNoConflict();
@ -2293,7 +2293,7 @@ INTERNAL P_JobDef(local_sim_job, _)
master_blended_client = sim_client_alloc(store);
} else {
/* We already have a master client */
ASSERT(0);
Assert(0);
}
}
}
@ -2366,7 +2366,7 @@ INTERNAL P_JobDef(local_sim_job, _)
if (tick > client->highest_received_tick) {
client->highest_received_tick = tick;
if (average_master_receive_dt_ns == 0) {
average_master_receive_dt_ns = NS_FROM_SECONDS(1) / SIM_TICKS_PER_SECOND;
average_master_receive_dt_ns = NsFromSeconds(1) / SIM_TICKS_PER_SECOND;
} else {
average_master_receive_dt_ns -= average_master_receive_dt_ns / 50;
average_master_receive_dt_ns += (real_time_ns - last_tick_from_master_received_at_ns) / 50;
@ -2377,7 +2377,7 @@ INTERNAL P_JobDef(local_sim_job, _)
}
} else {
/* We do not have the tick that the incoming delta is based from */
ASSERT(0);
Assert(0);
}
tmp_encoded_len = BB_ReadUV(&msg_br);
@ -2415,7 +2415,7 @@ INTERNAL P_JobDef(local_sim_job, _)
} else {
/* We do not have the tick that the incoming delta is based from.
* This decode should never have been queued in the first place. */
ASSERT(0);
Assert(0);
}
}
@ -2531,7 +2531,7 @@ INTERNAL P_JobDef(local_sim_job, _)
}
i64 blend_time_target_diff_ns = master_blend_time_target_ns - master_blend_time_ns;
if (blend_time_target_diff_ns > NS_FROM_SECONDS(0.100) || blend_time_target_diff_ns < NS_FROM_SECONDS(-0.100)) {
if (blend_time_target_diff_ns > NsFromSeconds(0.100) || blend_time_target_diff_ns < NsFromSeconds(-0.100)) {
/* Snap blend time if it gets too far from target blend time */
master_blend_time_ns = master_blend_time_target_ns;
}
@ -2574,7 +2574,7 @@ INTERNAL P_JobDef(local_sim_job, _)
/* Release unneeded blended master snapshots */
if (master_ss->tick > 0) {
sim_snapshot_release_ticks_in_range(master_blended_client, 0, master_ss->tick - 1);
sim_snapshot_release_ticks_in_range(master_blended_client, master_ss->tick + 1, U64_MAX);
sim_snapshot_release_ticks_in_range(master_blended_client, master_ss->tick + 1, U64Max);
}
}
} else {
@ -2675,7 +2675,7 @@ INTERNAL P_JobDef(local_sim_job, _)
}
/* Release any existing ticks that are about to be simulated */
sim_snapshot_release_ticks_in_range(local_client, step_base_tick + 1, U64_MAX);
sim_snapshot_release_ticks_in_range(local_client, step_base_tick + 1, U64Max);
/* Step */
generate_user_input_cmds(user_input_client, step_end_tick);

View File

@ -36,7 +36,7 @@ enum user_bind_kind {
USER_BIND_KIND_ZOOM_OUT,
USER_BIND_KIND_PAN,
#if RTC
#if RtcIsEnabled
/* Debug */
USER_BIND_KIND_RESET_DEBUG_STEPS,

View File

@ -5,7 +5,7 @@ struct watch_event {
struct watch_event *next;
};
GLOBAL struct {
Global struct {
P_Watch *watch;
Atomic32 watch_shutdown;
P_Counter watch_jobs_counter;
@ -19,21 +19,21 @@ GLOBAL struct {
P_Mutex watch_callbacks_mutex;
watch_callback *watch_callbacks[64];
u64 num_watch_callbacks;
} G = ZI, DEBUG_ALIAS(G, G_watch);
} G = ZI, DebugAlias(G, G_watch);
/* ========================== *
* Startup
* ========================== */
INTERNAL P_JobDef(watch_monitor_job, _);
INTERNAL P_JobDef(watch_dispatcher_job, _);
INTERNAL P_ExitFuncDef(watch_shutdown);
internal P_JobDef(watch_monitor_job, _);
internal P_JobDef(watch_dispatcher_job, _);
internal P_ExitFuncDef(watch_shutdown);
void watch_startup(void)
{
G.watch = P_AllocWatch(LIT("./"));
G.watch_events_arena = AllocArena(GIBI(64));
G.watch_events_arena = AllocArena(Gibi(64));
P_Run(1, watch_monitor_job, 0, P_Pool_Floating, P_Priority_Low, &G.watch_jobs_counter);
P_Run(1, watch_dispatcher_job, 0, P_Pool_Background, P_Priority_Low, &G.watch_jobs_counter);
@ -44,14 +44,14 @@ void watch_startup(void)
* Watch
* ========================== */
INTERNAL P_ExitFuncDef(watch_shutdown)
internal P_ExitFuncDef(watch_shutdown)
{
__prof;
atomic32_fetch_set(&G.watch_shutdown, 1);
{
P_Lock lock = P_LockE(&G.watch_dispatcher_mutex);
P_SignalCv(&G.watch_dispatcher_cv, I32_MAX);
P_SignalCv(&G.watch_dispatcher_cv, I32Max);
P_WakeWatch(G.watch);
P_Unlock(&lock);
}
@ -71,7 +71,7 @@ void watch_register_callback(watch_callback *callback)
P_Unlock(&lock);
}
INTERNAL P_JobDef(watch_monitor_job, _)
internal P_JobDef(watch_monitor_job, _)
{
(UNUSED)_;
TempArena scratch = BeginScratchNoConflict();
@ -108,7 +108,7 @@ INTERNAL P_JobDef(watch_monitor_job, _)
}
}
}
P_SignalCv(&G.watch_dispatcher_cv, I32_MAX);
P_SignalCv(&G.watch_dispatcher_cv, I32Max);
P_Unlock(&lock);
}
EndTempArena(temp);
@ -129,7 +129,7 @@ struct watch_callback_job_sig {
watch_callback **callbacks;
};
INTERNAL P_JobDef(watch_callback_job, job)
internal P_JobDef(watch_callback_job, job)
{
__prof;
struct watch_callback_job_sig *sig = job.sig;
@ -138,7 +138,7 @@ INTERNAL P_JobDef(watch_callback_job, job)
callback(name);
}
INTERNAL P_JobDef(watch_dispatcher_job, _)
internal P_JobDef(watch_dispatcher_job, _)
{
(UNUSED)_;
@ -152,7 +152,7 @@ INTERNAL P_JobDef(watch_dispatcher_job, _)
/* Delay so that duplicate events pile up */
{
__profn("Delay");
P_Wait(0, 0, 0, NS_FROM_SECONDS(WATCH_DISPATCHER_DELAY_SECONDS));
P_Wait(0, 0, 0, NsFromSeconds(WATCH_DISPATCHER_DELAY_SECONDS));
}
/* Pull watch events from queue */