remove thread local storage

This commit is contained in:
jacob 2025-07-03 12:20:17 -05:00
parent fe40690871
commit 6841ff9784
27 changed files with 389 additions and 358 deletions

View File

@ -198,7 +198,7 @@ INTERNAL struct app_arg_list parse_args(struct arena *arena, struct string args_
* Entry point * Entry point
* ========================== */ * ========================== */
void app_entry_point(struct string args_str) void sys_app_entry(struct string args_str)
{ {
struct arena_temp scratch = scratch_begin_no_conflict(); struct arena_temp scratch = scratch_begin_no_conflict();

View File

@ -17,8 +17,6 @@ struct string app_write_path_cat(struct arena *arena, struct string filename);
/* Register a function that will be called when the application exits */ /* Register a function that will be called when the application exits */
void app_register_exit_callback(app_exit_callback_func *func); void app_register_exit_callback(app_exit_callback_func *func);
void app_entry_point(struct string args);
void app_exit(void); void app_exit(void);
#endif #endif

View File

@ -195,21 +195,21 @@ INTERNAL struct huffman huffman_init(struct arena *arena, u32 max_code_bits, u32
u32 code_length_hist[HUFFMAN_BIT_COUNT] = ZI; u32 code_length_hist[HUFFMAN_BIT_COUNT] = ZI;
for (u32 i = 0; i < bl_counts_count; ++i) { for (u32 i = 0; i < bl_counts_count; ++i) {
u32 count = bl_counts[i]; u32 count = bl_counts[i];
ASSERT(count <= ARRAY_COUNT(code_length_hist)); ASSERT(count <= countof(code_length_hist));
++code_length_hist[count]; ++code_length_hist[count];
} }
u32 next_code[HUFFMAN_BIT_COUNT] = ZI; u32 next_code[HUFFMAN_BIT_COUNT] = ZI;
next_code[0] = 0; next_code[0] = 0;
code_length_hist[0] = 0; code_length_hist[0] = 0;
for (u32 i = 1; i < ARRAY_COUNT(next_code); ++i) { for (u32 i = 1; i < countof(next_code); ++i) {
next_code[i] = ((next_code[i - 1] + code_length_hist[i - 1]) << 1); next_code[i] = ((next_code[i - 1] + code_length_hist[i - 1]) << 1);
} }
for (u32 i = 0; i < bl_counts_count; ++i) { for (u32 i = 0; i < bl_counts_count; ++i) {
u32 code_bits = bl_counts[i]; u32 code_bits = bl_counts[i];
if (code_bits) { if (code_bits) {
ASSERT(code_bits < ARRAY_COUNT(next_code)); ASSERT(code_bits < countof(next_code));
u32 code = next_code[code_bits]++; u32 code = next_code[code_bits]++;
u32 arbitrary_bits = res.max_code_bits - code_bits; u32 arbitrary_bits = res.max_code_bits - code_bits;
u32 entry_count = (1 << arbitrary_bits); u32 entry_count = (1 << arbitrary_bits);
@ -294,12 +294,12 @@ INTERNAL void inflate(u8 *dst, u8 *encoded)
u32 code = g_hclen_order[i]; u32 code = g_hclen_order[i];
hclen_bl_counts[code] = consume_bits(&bb, 3); hclen_bl_counts[code] = consume_bits(&bb, 3);
} }
struct huffman dict_huffman = huffman_init(temp.arena, 7, hclen_bl_counts, ARRAY_COUNT(hclen_bl_counts)); struct huffman dict_huffman = huffman_init(temp.arena, 7, hclen_bl_counts, countof(hclen_bl_counts));
/* Decode dict huffman */ /* Decode dict huffman */
u32 lit_len_count = 0; u32 lit_len_count = 0;
u32 len_count = hlit + hdist; u32 len_count = hlit + hdist;
ASSERT(len_count <= ARRAY_COUNT(lit_len_dist_table)); ASSERT(len_count <= countof(lit_len_dist_table));
while (lit_len_count < len_count) { while (lit_len_count < len_count) {
u32 rep_count = 1; u32 rep_count = 1;
u32 rep_val = 0; u32 rep_val = 0;
@ -329,7 +329,7 @@ INTERNAL void inflate(u8 *dst, u8 *encoded)
hlit = 288; hlit = 288;
hdist = 32; hdist = 32;
u32 index = 0; u32 index = 0;
for (u32 i = 0; i < ARRAY_COUNT(g_fixed_bl_counts); ++i) { for (u32 i = 0; i < countof(g_fixed_bl_counts); ++i) {
u32 bit_count = g_fixed_bl_counts[i][1]; u32 bit_count = g_fixed_bl_counts[i][1];
u32 last_valuie = g_fixed_bl_counts[i][0]; u32 last_valuie = g_fixed_bl_counts[i][0];
while (index <= last_valuie) { while (index <= last_valuie) {

View File

@ -60,7 +60,7 @@ INTERNAL void refresh_dbg_table(void)
struct sys_lock lock = sys_mutex_lock_e(G.dbg_table_mutex); struct sys_lock lock = sys_mutex_lock_e(G.dbg_table_mutex);
MEMZERO_ARRAY(G.dbg_table); MEMZERO_ARRAY(G.dbg_table);
G.dbg_table_count = 0; G.dbg_table_count = 0;
for (u64 i = 0; i < ARRAY_COUNT(G.lookup); ++i) { for (u64 i = 0; i < countof(G.lookup); ++i) {
struct asset *asset = &G.lookup[i]; struct asset *asset = &G.lookup[i];
if (asset->hash != 0) { if (asset->hash != 0) {
G.dbg_table[G.dbg_table_count++] = asset; G.dbg_table[G.dbg_table_count++] = asset;
@ -77,7 +77,7 @@ INTERNAL struct asset *asset_cache_get_slot_locked(struct sys_lock *lock, struct
sys_assert_locked_e_or_s(lock, G.lookup_mutex); sys_assert_locked_e_or_s(lock, G.lookup_mutex);
(UNUSED)lock; (UNUSED)lock;
u64 index = hash % ARRAY_COUNT(G.lookup); u64 index = hash % countof(G.lookup);
while (true) { while (true) {
struct asset *slot = &G.lookup[index]; struct asset *slot = &G.lookup[index];
if (slot->hash) { if (slot->hash) {
@ -87,7 +87,7 @@ INTERNAL struct asset *asset_cache_get_slot_locked(struct sys_lock *lock, struct
return slot; return slot;
} else { } else {
++index; ++index;
if (index >= ARRAY_COUNT(G.lookup)) { if (index >= countof(G.lookup)) {
index = 0; index = 0;
} }
} }

View File

@ -755,7 +755,7 @@ void bitbuff_test(void)
{ {
struct bitbuff bb = bitbuff_alloc(GIGABYTE(64)); struct bitbuff bb = bitbuff_alloc(GIGABYTE(64));
struct bitbuff_writer bw = bw_from_bitbuff(&bb); struct bitbuff_writer bw = bw_from_bitbuff(&bb);
for (u64 i = 0; i < ARRAY_COUNT(cases); ++i) { for (u64 i = 0; i < countof(cases); ++i) {
struct test_case c = cases[i]; struct test_case c = cases[i];
if (c.kind == kind_ubits) { if (c.kind == kind_ubits) {
bw_write_ubits(&bw, c.ubits.v, c.ubits.num_bits); bw_write_ubits(&bw, c.ubits.v, c.ubits.num_bits);
@ -777,7 +777,7 @@ void bitbuff_test(void)
{ {
struct bitbuff bb = bitbuff_from_string(encoded); struct bitbuff bb = bitbuff_from_string(encoded);
struct bitbuff_reader br = br_from_bitbuff(&bb); struct bitbuff_reader br = br_from_bitbuff(&bb);
for (u64 i = 0; i < ARRAY_COUNT(cases); ++i) { for (u64 i = 0; i < countof(cases); ++i) {
struct test_case c = cases[i]; struct test_case c = cases[i];
if (c.kind == kind_ubits) { if (c.kind == kind_ubits) {
u64 w = c.ubits.v; u64 w = c.ubits.v;

View File

@ -458,7 +458,7 @@ INTERNAL struct epa_result epa_get_normal_from_gjk(struct collider_shape *shape0
#if COLLIDER_DEBUG #if COLLIDER_DEBUG
res.dbg_step = dbg_step; res.dbg_step = dbg_step;
u32 len = min_u32(proto_count, ARRAY_COUNT(res.prototype.points)); u32 len = min_u32(proto_count, countof(res.prototype.points));
for (u32 i = 0; i < len; ++i) { for (u32 i = 0; i < len; ++i) {
res.prototype.points[i] = proto[i].p; res.prototype.points[i] = proto[i].p;
} }
@ -601,7 +601,7 @@ struct collider_collision_points_result collider_collision_points(struct collide
/* Clip to determine final points */ /* Clip to determine final points */
if (colliding) { if (colliding) {
/* Max vertices must be < 16 to fit in 4 bit ids */ /* Max vertices must be < 16 to fit in 4 bit ids */
CT_ASSERT(ARRAY_COUNT(shape0->points) <= 16); CT_ASSERT(countof(shape0->points) <= 16);
struct collider_menkowski_feature f = epa_res.closest_feature; struct collider_menkowski_feature f = epa_res.closest_feature;

View File

@ -235,6 +235,9 @@ void __asan_unpoison_memory_region(void const volatile *add, size_t);
#define NS_FROM_SECONDS(s) ((i64)((s) * 1000000000.0)) #define NS_FROM_SECONDS(s) ((i64)((s) * 1000000000.0))
#define SECONDS_FROM_NS(ns) ((f64)(ns) / 1000000000.0) #define SECONDS_FROM_NS(ns) ((f64)(ns) / 1000000000.0)
/* countof */
#define countof(a) (sizeof(a) / sizeof((a)[0]))
/* typeof */ /* typeof */
#if COMPILER_MSVC #if COMPILER_MSVC
/* Typeof not supported in MSVC */ /* Typeof not supported in MSVC */
@ -252,7 +255,14 @@ void __asan_unpoison_memory_region(void const volatile *add, size_t);
# define alignof(type) __alignof(type) # define alignof(type) __alignof(type)
#endif #endif
#define ARRAY_COUNT(a) (sizeof(a) / sizeof((a)[0])) /* alignas */
#if (LANGUAGE_C && (__STDC_VERSION__ < 202311L))
# if COMPILER_MSVC
# define alignas(n) __declspec(align(n))
# else
# define alignas(n) __attribute__((aligned(n)))
# endif
#endif
/* Field macros */ /* Field macros */
#define FIELD_SIZEOF(type, field) sizeof(((type *)0)->field) #define FIELD_SIZEOF(type, field) sizeof(((type *)0)->field)

View File

@ -189,7 +189,7 @@ void draw_circle_line(struct gp_flow *flow, struct v2 pos, f32 radius, f32 thick
void draw_quad_line(struct gp_flow *flow, struct quad quad, f32 thickness, u32 color) void draw_quad_line(struct gp_flow *flow, struct quad quad, f32 thickness, u32 color)
{ {
struct v2 points[] = { quad.p0, quad.p1, quad.p2, quad.p3 }; struct v2 points[] = { quad.p0, quad.p1, quad.p2, quad.p3 };
struct v2_array a = { .points = points, .count = ARRAY_COUNT(points) }; struct v2_array a = { .points = points, .count = countof(points) };
draw_poly_line(flow, a, true, thickness, color); draw_poly_line(flow, a, true, thickness, color);
} }
@ -215,7 +215,7 @@ void draw_arrow_line(struct gp_flow *flow, struct v2 start, struct v2 end, f32 t
struct v2 head_points[] = { end, head_p1, head_p2 }; struct v2 head_points[] = { end, head_p1, head_p2 };
struct v2_array head_points_v2_array = { struct v2_array head_points_v2_array = {
.points = head_points, .points = head_points,
.count = ARRAY_COUNT(head_points) .count = countof(head_points)
}; };
draw_poly(flow, head_points_v2_array, color); draw_poly(flow, head_points_v2_array, color);

View File

@ -103,7 +103,7 @@ INTERNAL JOB_DEF(font_load_asset_job, job)
i64 start_ns = sys_time_ns(); i64 start_ns = sys_time_ns();
ASSERT(string_ends_with(path, LIT(".ttf"))); ASSERT(string_ends_with(path, LIT(".ttf")));
ASSERT(ARRAY_COUNT(g_font_codes) < LOOKUP_TABLE_SIZE); ASSERT(countof(g_font_codes) < LOOKUP_TABLE_SIZE);
/* Decode */ /* Decode */
struct resource res = resource_open(path); struct resource res = resource_open(path);
@ -113,7 +113,7 @@ INTERNAL JOB_DEF(font_load_asset_job, job)
LIT("Font \"%F\" not found"), LIT("Font \"%F\" not found"),
FMT_STR(path))); FMT_STR(path)));
} }
struct ttf_decode_result result = ttf_decode(scratch.arena, resource_get_data(&res), point_size, g_font_codes, ARRAY_COUNT(g_font_codes)); struct ttf_decode_result result = ttf_decode(scratch.arena, resource_get_data(&res), point_size, g_font_codes, countof(g_font_codes));
resource_close(&res); resource_close(&res);
/* Send texture to GPU */ /* Send texture to GPU */
@ -147,7 +147,7 @@ INTERNAL JOB_DEF(font_load_asset_job, job)
MEMCPY(font->glyphs, result.glyphs, sizeof(*font->glyphs) * result.glyphs_count); MEMCPY(font->glyphs, result.glyphs, sizeof(*font->glyphs) * result.glyphs_count);
/* Build lookup table */ /* Build lookup table */
for (u64 i = 0; i < ARRAY_COUNT(g_font_codes); ++i) { for (u64 i = 0; i < countof(g_font_codes); ++i) {
u32 codepoint = g_font_codes[i]; u32 codepoint = g_font_codes[i];
font->lookup[codepoint] = result.cache_indices[i]; font->lookup[codepoint] = result.cache_indices[i];
} }

View File

@ -331,7 +331,7 @@ struct gp_startup_receipt gp_startup(struct work_startup_receipt *work_sr)
NULL, NULL,
flags, flags,
levels, levels,
ARRAY_COUNT(levels), countof(levels),
D3D11_SDK_VERSION, D3D11_SDK_VERSION,
&device, &device,
NULL, NULL,
@ -626,7 +626,7 @@ INTERNAL void init_shader_table(void)
}; };
#if RESOURCE_RELOADING #if RESOURCE_RELOADING
for (u64 i = 0; i < ARRAY_COUNT(G.shader_info); ++i) { for (u64 i = 0; i < countof(G.shader_info); ++i) {
struct dx11_shader_desc *desc = &G.shader_info[i]; struct dx11_shader_desc *desc = &G.shader_info[i];
desc->includes_arena = arena_alloc(MEGABYTE(8)); desc->includes_arena = arena_alloc(MEGABYTE(8));
desc->includes_mutex = sys_mutex_alloc(); desc->includes_mutex = sys_mutex_alloc();
@ -824,7 +824,7 @@ INTERNAL struct string shader_alloc(struct arena *arena, struct dx11_shader *sha
if (success && !error_blob) { if (success && !error_blob) {
/* Get number of device layout elements from NULL terminated array */ /* Get number of device layout elements from NULL terminated array */
u32 elem_count = 0; u32 elem_count = 0;
for (; elem_count < ARRAY_COUNT(shader_desc->input_layout_desc); ++elem_count) { for (; elem_count < countof(shader_desc->input_layout_desc); ++elem_count) {
const D3D11_INPUT_ELEMENT_DESC *d = &shader_desc->input_layout_desc[elem_count]; const D3D11_INPUT_ELEMENT_DESC *d = &shader_desc->input_layout_desc[elem_count];
if (d->SemanticName == NULL) { if (d->SemanticName == NULL) {
break; break;
@ -960,7 +960,7 @@ INTERNAL enum DXGI_FORMAT dx11_format_from_gp_format(enum gp_texture_format gp_f
[GP_TEXTURE_FORMAT_R8G8B8A8_UNORM_SRGB] = DXGI_FORMAT_R8G8B8A8_UNORM_SRGB [GP_TEXTURE_FORMAT_R8G8B8A8_UNORM_SRGB] = DXGI_FORMAT_R8G8B8A8_UNORM_SRGB
}; };
enum DXGI_FORMAT res = DXGI_FORMAT_UNKNOWN; enum DXGI_FORMAT res = DXGI_FORMAT_UNKNOWN;
if ((u32)gp_format < ARRAY_COUNT(dx11_formats)) { if ((u32)gp_format < countof(dx11_formats)) {
res = dx11_formats[gp_format]; res = dx11_formats[gp_format];
} }
return res; return res;
@ -973,7 +973,7 @@ INTERNAL u32 pixel_size_from_dx11_format(enum DXGI_FORMAT dx11_format)
[DXGI_FORMAT_R8G8B8A8_UNORM_SRGB] = 4 [DXGI_FORMAT_R8G8B8A8_UNORM_SRGB] = 4
}; };
u32 res = 0; u32 res = 0;
if ((u32)dx11_format < ARRAY_COUNT(pixel_sizes)) { if ((u32)dx11_format < countof(pixel_sizes)) {
res = pixel_sizes[dx11_format]; res = pixel_sizes[dx11_format];
} }
return res; return res;
@ -1647,7 +1647,7 @@ void gp_dispatch(struct gp_dispatch_params params)
/* Bind RTVs */ /* Bind RTVs */
ID3D11RenderTargetView *rtvs[] = { final_tex->rtv }; ID3D11RenderTargetView *rtvs[] = { final_tex->rtv };
ID3D11DeviceContext_OMSetRenderTargets(G.devcon, ARRAY_COUNT(rtvs), rtvs, NULL); ID3D11DeviceContext_OMSetRenderTargets(G.devcon, countof(rtvs), rtvs, NULL);
/* Draw */ /* Draw */
ID3D11DeviceContext_IASetPrimitiveTopology(G.devcon, D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST); ID3D11DeviceContext_IASetPrimitiveTopology(G.devcon, D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
@ -1690,12 +1690,12 @@ void gp_dispatch(struct gp_dispatch_params params)
/* Bind SRVs */ /* Bind SRVs */
ID3D11ShaderResourceView *srvs[] = { instance_buffer->srv }; ID3D11ShaderResourceView *srvs[] = { instance_buffer->srv };
ID3D11DeviceContext_VSSetShaderResources(G.devcon, 0, ARRAY_COUNT(srvs), srvs); ID3D11DeviceContext_VSSetShaderResources(G.devcon, 0, countof(srvs), srvs);
ID3D11DeviceContext_PSSetShaderResources(G.devcon, 0, ARRAY_COUNT(srvs), srvs); ID3D11DeviceContext_PSSetShaderResources(G.devcon, 0, countof(srvs), srvs);
/* Bind RTVs */ /* Bind RTVs */
ID3D11RenderTargetView *rtvs[] = { final_tex->rtv }; ID3D11RenderTargetView *rtvs[] = { final_tex->rtv };
ID3D11DeviceContext_OMSetRenderTargets(G.devcon, ARRAY_COUNT(rtvs), rtvs, NULL); ID3D11DeviceContext_OMSetRenderTargets(G.devcon, countof(rtvs), rtvs, NULL);
/* Draw */ /* Draw */
ID3D11DeviceContext_IASetPrimitiveTopology(G.devcon, D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST); ID3D11DeviceContext_IASetPrimitiveTopology(G.devcon, D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
@ -1747,12 +1747,12 @@ void gp_dispatch(struct gp_dispatch_params params)
/* Bind SRVs */ /* Bind SRVs */
ID3D11ShaderResourceView *srvs[] = { instance_buffer->srv, texture->srv }; ID3D11ShaderResourceView *srvs[] = { instance_buffer->srv, texture->srv };
ID3D11DeviceContext_VSSetShaderResources(G.devcon, 0, ARRAY_COUNT(srvs), srvs); ID3D11DeviceContext_VSSetShaderResources(G.devcon, 0, countof(srvs), srvs);
ID3D11DeviceContext_PSSetShaderResources(G.devcon, 0, ARRAY_COUNT(srvs), srvs); ID3D11DeviceContext_PSSetShaderResources(G.devcon, 0, countof(srvs), srvs);
/* Bind RTVs */ /* Bind RTVs */
ID3D11RenderTargetView *rtvs[] = { final_tex->rtv }; ID3D11RenderTargetView *rtvs[] = { final_tex->rtv };
ID3D11DeviceContext_OMSetRenderTargets(G.devcon, ARRAY_COUNT(rtvs), rtvs, NULL); ID3D11DeviceContext_OMSetRenderTargets(G.devcon, countof(rtvs), rtvs, NULL);
/* Draw */ /* Draw */
ID3D11DeviceContext_IASetPrimitiveTopology(G.devcon, D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST); ID3D11DeviceContext_IASetPrimitiveTopology(G.devcon, D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
@ -1796,12 +1796,12 @@ void gp_dispatch(struct gp_dispatch_params params)
/* Bind SRVs */ /* Bind SRVs */
ID3D11ShaderResourceView *srvs[] = { instance_buffer->srv }; ID3D11ShaderResourceView *srvs[] = { instance_buffer->srv };
ID3D11DeviceContext_VSSetShaderResources(G.devcon, 0, ARRAY_COUNT(srvs), srvs); ID3D11DeviceContext_VSSetShaderResources(G.devcon, 0, countof(srvs), srvs);
ID3D11DeviceContext_PSSetShaderResources(G.devcon, 0, ARRAY_COUNT(srvs), srvs); ID3D11DeviceContext_PSSetShaderResources(G.devcon, 0, countof(srvs), srvs);
/* Bind RTVs */ /* Bind RTVs */
ID3D11RenderTargetView *rtvs[] = { final_tex->rtv }; ID3D11RenderTargetView *rtvs[] = { final_tex->rtv };
ID3D11DeviceContext_OMSetRenderTargets(G.devcon, ARRAY_COUNT(rtvs), rtvs, NULL); ID3D11DeviceContext_OMSetRenderTargets(G.devcon, countof(rtvs), rtvs, NULL);
/* Draw */ /* Draw */
ID3D11DeviceContext_IASetPrimitiveTopology(G.devcon, D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST); ID3D11DeviceContext_IASetPrimitiveTopology(G.devcon, D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
@ -2051,7 +2051,7 @@ INTERNAL void capture_image_for_profiler(void)
ID3D11Texture2D_Release(backbuffer); ID3D11Texture2D_Release(backbuffer);
++cap_index; ++cap_index;
if (cap_index >= ARRAY_COUNT(staging_caps)) { if (cap_index >= countof(staging_caps)) {
cap_index = 0; cap_index = 0;
ready_to_read = true; ready_to_read = true;
} }
@ -2082,7 +2082,7 @@ INTERNAL void capture_image_for_profiler(void)
{ {
__profscope(prof_frame_image); __profscope(prof_frame_image);
__profframeimage(dest, (u16)final_width, (u16)final_height, ARRAY_COUNT(staging_caps) - 1, false); __profframeimage(dest, (u16)final_width, (u16)final_height, countof(staging_caps) - 1, false);
} }
scratch_end(scratch); scratch_end(scratch);

View File

@ -402,7 +402,7 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(gp_shutdown)
#if 0 #if 0
/* Release objects to make live object reporting less noisy */ /* Release objects to make live object reporting less noisy */
//IDXGISwapChain3_Release(G.swapchain); //IDXGISwapChain3_Release(G.swapchain);
for (u32 i = 0; i < ARRAY_COUNT(G.command_queues); ++i) { for (u32 i = 0; i < countof(G.command_queues); ++i) {
struct command_queue *cq = G.command_queues[i]; struct command_queue *cq = G.command_queues[i];
cmomand_queue_release(cq); cmomand_queue_release(cq);
} }
@ -701,7 +701,7 @@ INTERNAL HRESULT dx12_include_open(ID3DInclude *d3d_handler, D3D_INCLUDE_TYPE in
struct string name = string_from_cstr_no_limit((char *)name_cstr); struct string name = string_from_cstr_no_limit((char *)name_cstr);
u64 hash = hash_fnv64(HASH_FNV64_BASIS, name); u64 hash = hash_fnv64(HASH_FNV64_BASIS, name);
if (handler->num_open_resources >= ARRAY_COUNT(handler->open_resources)) { if (handler->num_open_resources >= countof(handler->open_resources)) {
sys_panic(LIT("Dx12 include handler resource overflow")); sys_panic(LIT("Dx12 include handler resource overflow"));
} }
@ -929,7 +929,7 @@ INTERNAL JOB_DEF(pipeline_init_job, job)
if (success) { if (success) {
struct shader_compile_job_param *params[] = { &vs, &ps }; struct shader_compile_job_param *params[] = { &vs, &ps };
struct shader_compile_job_sig comp_sig = { .params = params }; struct shader_compile_job_sig comp_sig = { .params = params };
job_dispatch_wait(ARRAY_COUNT(params), shader_compile_job, &comp_sig); job_dispatch_wait(countof(params), shader_compile_job, &comp_sig);
success = vs.success && ps.success; success = vs.success && ps.success;
} }
@ -1005,7 +1005,7 @@ INTERNAL JOB_DEF(pipeline_init_job, job)
/* Input layout */ /* Input layout */
u32 num_input_layout_elements = 0; u32 num_input_layout_elements = 0;
for (u32 i = 0; i < ARRAY_COUNT(desc->ia); ++i) { for (u32 i = 0; i < countof(desc->ia); ++i) {
if (desc->ia[i].SemanticName == NULL) { if (desc->ia[i].SemanticName == NULL) {
break; break;
} }
@ -1358,7 +1358,7 @@ INTERNAL struct cpu_descriptor_heap *cpu_descriptor_heap_alloc(enum D3D12_DESCRI
u32 num_descriptors = 0; u32 num_descriptors = 0;
u32 descriptor_size = 0; u32 descriptor_size = 0;
if (type < (i32)ARRAY_COUNT(G.desc_counts) && type < (i32)ARRAY_COUNT(G.desc_sizes)) { if (type < (i32)countof(G.desc_counts) && type < (i32)countof(G.desc_sizes)) {
num_descriptors = G.desc_counts[type]; num_descriptors = G.desc_counts[type];
descriptor_size = G.desc_sizes[type]; descriptor_size = G.desc_sizes[type];
} }
@ -1539,10 +1539,10 @@ INTERNAL void fenced_release(void *data, enum fenced_release_kind kind)
fr.kind = kind; fr.kind = kind;
fr.ptr = data; fr.ptr = data;
u64 fr_targets[ARRAY_COUNT(G.fenced_release_targets)] = ZI; u64 fr_targets[countof(G.fenced_release_targets)] = ZI;
/* Read current fence target values from command queues */ /* Read current fence target values from command queues */
for (u32 i = 0; i < ARRAY_COUNT(G.command_queues); ++i) { for (u32 i = 0; i < countof(G.command_queues); ++i) {
struct command_queue *cq = G.command_queues[i]; struct command_queue *cq = G.command_queues[i];
struct sys_lock lock = sys_mutex_lock_s(cq->submit_fence_mutex); struct sys_lock lock = sys_mutex_lock_s(cq->submit_fence_mutex);
fr_targets[i] = cq->submit_fence_target; fr_targets[i] = cq->submit_fence_target;
@ -2199,7 +2199,7 @@ struct gp_resource *gp_texture_alloc(enum gp_texture_format format, u32 flags, s
DXGI_FORMAT dxgi_format = ZI; DXGI_FORMAT dxgi_format = ZI;
u32 pixel_size = 0; u32 pixel_size = 0;
if (format < (i32)ARRAY_COUNT(formats)) { if (format < (i32)countof(formats)) {
dxgi_format = formats[format].format; dxgi_format = formats[format].format;
pixel_size = formats[format].size; pixel_size = formats[format].size;
ASSERT(dxgi_format != 0); ASSERT(dxgi_format != 0);
@ -2477,7 +2477,7 @@ void gp_dispatch(struct gp_dispatch_params params)
/* Set descriptor heap */ /* Set descriptor heap */
ID3D12DescriptorHeap *heaps[] = { descriptor_heap->heap }; ID3D12DescriptorHeap *heaps[] = { descriptor_heap->heap };
ID3D12GraphicsCommandList_SetDescriptorHeaps(cl->cl, ARRAY_COUNT(heaps), heaps); ID3D12GraphicsCommandList_SetDescriptorHeaps(cl->cl, countof(heaps), heaps);
ID3D12GraphicsCommandList_SetGraphicsRootDescriptorTable(cl->cl, 3, descriptor_heap->gpu_handle); ID3D12GraphicsCommandList_SetGraphicsRootDescriptorTable(cl->cl, 3, descriptor_heap->gpu_handle);
/* Setup Rasterizer State */ /* Setup Rasterizer State */
@ -2603,7 +2603,7 @@ INTERNAL struct swapchain_buffer *update_swapchain(struct swapchain *swapchain,
} }
/* Release buffers */ /* Release buffers */
for (u32 i = 0; i < ARRAY_COUNT(swapchain->buffers); ++i) { for (u32 i = 0; i < countof(swapchain->buffers); ++i) {
struct swapchain_buffer *sb = &swapchain->buffers[i]; struct swapchain_buffer *sb = &swapchain->buffers[i];
descriptor_release(sb->rtv_descriptor); descriptor_release(sb->rtv_descriptor);
ID3D12Resource_Release(sb->resource); ID3D12Resource_Release(sb->resource);
@ -2653,7 +2653,7 @@ INTERNAL struct swapchain_buffer *update_swapchain(struct swapchain *swapchain,
} }
/* Allocate swapchain resources */ /* Allocate swapchain resources */
for (u32 i = 0; i < ARRAY_COUNT(swapchain->buffers); ++i) { for (u32 i = 0; i < countof(swapchain->buffers); ++i) {
ID3D12Resource *resource = NULL; ID3D12Resource *resource = NULL;
hr = IDXGISwapChain3_GetBuffer(swapchain->swapchain, i, &IID_ID3D12Resource, (void **)&resource); hr = IDXGISwapChain3_GetBuffer(swapchain->swapchain, i, &IID_ID3D12Resource, (void **)&resource);
if (FAILED(hr)) { if (FAILED(hr)) {
@ -2740,7 +2740,7 @@ INTERNAL void present_blit(struct swapchain_buffer *dst, struct dx12_resource *s
/* Set descriptor heap */ /* Set descriptor heap */
ID3D12DescriptorHeap *heaps[] = { descriptor_heap->heap }; ID3D12DescriptorHeap *heaps[] = { descriptor_heap->heap };
ID3D12GraphicsCommandList_SetDescriptorHeaps(cl->cl, ARRAY_COUNT(heaps), heaps); ID3D12GraphicsCommandList_SetDescriptorHeaps(cl->cl, countof(heaps), heaps);
ID3D12GraphicsCommandList_SetGraphicsRootDescriptorTable(cl->cl, 1, descriptor_heap->gpu_handle); ID3D12GraphicsCommandList_SetGraphicsRootDescriptorTable(cl->cl, 1, descriptor_heap->gpu_handle);
/* Setup Rasterizer State */ /* Setup Rasterizer State */
@ -2810,7 +2810,7 @@ void gp_present(struct sys_window *window, struct v2i32 backresolution, struct g
__profscope(Mark queue frames); __profscope(Mark queue frames);
/* Lock because frame marks shouldn't occur while command lists are recording */ /* Lock because frame marks shouldn't occur while command lists are recording */
struct sys_lock lock = sys_mutex_lock_e(G.global_command_list_record_mutex); struct sys_lock lock = sys_mutex_lock_e(G.global_command_list_record_mutex);
for (u32 i = 0; i < ARRAY_COUNT(G.command_queues); ++i) { for (u32 i = 0; i < countof(G.command_queues); ++i) {
struct command_queue *cq = G.command_queues[i]; struct command_queue *cq = G.command_queues[i];
__prof_dx12_new_frame(cq->prof); __prof_dx12_new_frame(cq->prof);
} }
@ -2818,7 +2818,7 @@ void gp_present(struct sys_window *window, struct v2i32 backresolution, struct g
} }
{ {
__profscope(Collect queues); __profscope(Collect queues);
for (u32 i = 0; i < ARRAY_COUNT(G.command_queues); ++i) { for (u32 i = 0; i < countof(G.command_queues); ++i) {
struct command_queue *cq = G.command_queues[i]; struct command_queue *cq = G.command_queues[i];
__prof_dx12_collect(cq->prof); __prof_dx12_collect(cq->prof);
} }
@ -2849,7 +2849,7 @@ INTERNAL SYS_THREAD_DEF(evictor_thread_entry_point, arg)
{ {
__profscope(Run); __profscope(Run);
u64 targets[ARRAY_COUNT(completed_targets)] = ZI; u64 targets[countof(completed_targets)] = ZI;
/* Copy queued data */ /* Copy queued data */
u32 num_fenced_releases = 0; u32 num_fenced_releases = 0;
@ -2868,7 +2868,7 @@ INTERNAL SYS_THREAD_DEF(evictor_thread_entry_point, arg)
/* Wait until fences reach target */ /* Wait until fences reach target */
{ {
__profscope(Check fences); __profscope(Check fences);
for (u32 i = 0; i < ARRAY_COUNT(targets) && !shutdown; ++i) { for (u32 i = 0; i < countof(targets) && !shutdown; ++i) {
while (completed_targets[i] < targets[i] && !shutdown) { while (completed_targets[i] < targets[i] && !shutdown) {
struct command_queue *cq = G.command_queues[i]; struct command_queue *cq = G.command_queues[i];
completed_targets[i] = ID3D12Fence_GetCompletedValue(cq->submit_fence); completed_targets[i] = ID3D12Fence_GetCompletedValue(cq->submit_fence);

View File

@ -38,14 +38,24 @@ struct worker_job_queue {
struct worker_job *last; struct worker_job *last;
}; };
struct alignas(64) worker_ctx {
/* 4 bytes */
i32 worker_id; /* Will be -1 if thread is not a worker */
/* 4 bytes */
i32 pin_depth;
/* 4 bytes */
b32 initialized;
/* 52 bytes */
u8 padding[52];
};
/* One ctx per cache line (prevent false sharing) */
CT_ASSERT(alignof(struct worker_ctx) == 64);
CT_ASSERT(sizeof(struct worker_ctx) == 64);
/* ========================== * /* ========================== *
* Global state * Global state
* ========================== */ * ========================== */
struct worker_info {
i32 id;
};
GLOBAL struct { GLOBAL struct {
struct arena *arena; struct arena *arena;
@ -64,6 +74,9 @@ GLOBAL struct {
i32 num_worker_threads; i32 num_worker_threads;
b32 workers_shutdown; b32 workers_shutdown;
struct sys_thread *worker_threads[JOB_MAX_WORKERS]; struct sys_thread *worker_threads[JOB_MAX_WORKERS];
/* Fiber local storage */
struct worker_ctx worker_contexts[SYS_MAX_FIBERS];
} G = ZI, DEBUG_ALIAS(G, G_job); } G = ZI, DEBUG_ALIAS(G, G_job);
/* ========================== * /* ========================== *
@ -135,19 +148,16 @@ INTERNAL void atomic_unlock(void)
* Worker TLS * Worker TLS
* ========================== */ * ========================== */
struct worker_ctx { INTERNAL struct worker_ctx *worker_ctx_from_fiber_id(i32 fiber_id)
i32 worker_id; /* Will be -1 if thread is not a worker */
i32 pin_depth;
};
INTERNAL THREAD_LOCAL_VAR_ALLOC_FUNC_DEF(worker_ctx_init, vctx)
{ {
struct worker_ctx *ctx = vctx; struct worker_ctx *ctx = &G.worker_contexts[fiber_id];
if (!ctx->initialized) {
ctx->initialized = 1;
ctx->worker_id = -1; ctx->worker_id = -1;
}
return ctx;
} }
GLOBAL THREAD_LOCAL_VAR_DEF(tl_worker_ctx, struct worker_ctx, worker_ctx_init, NULL);
/* ========================== * /* ========================== *
* Job * Job
* ========================== */ * ========================== */
@ -162,7 +172,7 @@ struct job_desc {
INTERNAL struct job_handle job_dispatch_ex(struct job_desc desc) INTERNAL struct job_handle job_dispatch_ex(struct job_desc desc)
{ {
struct worker_ctx *ctx = thread_local_var_fetch(&tl_worker_ctx); struct worker_ctx *ctx = worker_ctx_from_fiber_id(sys_current_fiber_id());
i32 worker_id = ctx->worker_id; i32 worker_id = ctx->worker_id;
job_func *job_func = desc.func; job_func *job_func = desc.func;
@ -406,7 +416,7 @@ void job_wait(struct job_handle handle)
if (job && handle.gen) { if (job && handle.gen) {
b32 is_done = atomic_u64_fetch(&job->gen) != handle.gen; b32 is_done = atomic_u64_fetch(&job->gen) != handle.gen;
if (!is_done) { if (!is_done) {
struct worker_ctx *ctx = thread_local_var_fetch(&tl_worker_ctx); struct worker_ctx *ctx = worker_ctx_from_fiber_id(sys_current_fiber_id());
i32 worker_id = ctx->worker_id; i32 worker_id = ctx->worker_id;
i32 job_pinned_worker = job->pinned_worker_id; i32 job_pinned_worker = job->pinned_worker_id;
if (worker_id >= 0 && (job_pinned_worker < 0 || job_pinned_worker == worker_id)) { if (worker_id >= 0 && (job_pinned_worker < 0 || job_pinned_worker == worker_id)) {
@ -502,7 +512,7 @@ INTERNAL SYS_THREAD_DEF(worker_thread_entry_point, thread_arg)
{ {
i32 worker_id = (i32)(i64)thread_arg; i32 worker_id = (i32)(i64)thread_arg;
struct worker_ctx *ctx = thread_local_var_fetch(&tl_worker_ctx); struct worker_ctx *ctx = worker_ctx_from_fiber_id(sys_current_fiber_id());
ctx->worker_id = worker_id; ctx->worker_id = worker_id;
struct worker_job_queue *queues[] = { &G.pinned_queues[worker_id], &G.global_queue }; struct worker_job_queue *queues[] = { &G.pinned_queues[worker_id], &G.global_queue };
@ -523,7 +533,7 @@ INTERNAL SYS_THREAD_DEF(worker_thread_entry_point, thread_arg)
__profscope(Pick job); __profscope(Pick job);
atomic_lock(); atomic_lock();
{ {
for (i32 queue_index = 0; queue_index < (i32)ARRAY_COUNT(queues); ++queue_index) { for (i32 queue_index = 0; queue_index < (i32)countof(queues); ++queue_index) {
struct worker_job_queue *queue = queues[queue_index]; struct worker_job_queue *queue = queues[queue_index];
struct worker_job *tmp = queue->first; struct worker_job *tmp = queue->first;
while (!job && tmp) { while (!job && tmp) {

View File

@ -92,8 +92,8 @@ void phys_create_and_update_contacts(struct phys_step_ctx *ctx, f32 elapsed_dt,
struct collider_collision_points_result collider_res = collider_collision_points(&e0_collider, &e1_collider, e0_xf, e1_xf); struct collider_collision_points_result collider_res = collider_collision_points(&e0_collider, &e1_collider, e0_xf, e1_xf);
/* Parts of algorithm are hard-coded to support 2 contact points */ /* Parts of algorithm are hard-coded to support 2 contact points */
CT_ASSERT(ARRAY_COUNT(constraint_ent->contact_constraint_data.points) == 2); CT_ASSERT(countof(constraint_ent->contact_constraint_data.points) == 2);
CT_ASSERT(ARRAY_COUNT(collider_res.points) == 2); CT_ASSERT(countof(collider_res.points) == 2);
struct phys_contact_constraint *constraint = NULL; struct phys_contact_constraint *constraint = NULL;
if (collider_res.num_points > 0) { if (collider_res.num_points > 0) {

View File

@ -1,7 +1,6 @@
#include "rand.h" #include "rand.h"
#include "sys.h" #include "sys.h"
#include "memory.h" #include "memory.h"
#include "thread_local.h"
/* TODO: Use a value that gives good precision when dividing into range 0 -> 1 */ /* TODO: Use a value that gives good precision when dividing into range 0 -> 1 */
#define F64_RAND_MAX U64_MAX #define F64_RAND_MAX U64_MAX

View File

@ -103,7 +103,7 @@ struct resource resource_open(struct string name)
return res; return res;
#else #else
struct resource res = ZI; struct resource res = ZI;
if (name.len < ARRAY_COUNT(res._name_text)) { if (name.len < countof(res._name_text)) {
u8 path_text[RESOURCE_NAME_LEN_MAX + (sizeof("res/") - 1)]; u8 path_text[RESOURCE_NAME_LEN_MAX + (sizeof("res/") - 1)];
struct string path = ZI; struct string path = ZI;
{ {
@ -174,7 +174,7 @@ void resource_register_watch_callback(resource_watch_callback *callback)
{ {
struct sys_lock lock = sys_mutex_lock_e(G.watch_callbacks_mutex); struct sys_lock lock = sys_mutex_lock_e(G.watch_callbacks_mutex);
{ {
if (G.num_watch_callbacks < ARRAY_COUNT(G.watch_callbacks)) { if (G.num_watch_callbacks < countof(G.watch_callbacks)) {
G.watch_callbacks[G.num_watch_callbacks++] = callback; G.watch_callbacks[G.num_watch_callbacks++] = callback;
} else { } else {
sys_panic(LIT("Max resource watch callbacks reached")); sys_panic(LIT("Max resource watch callbacks reached"));

View File

@ -1,10 +1,11 @@
#if 0
#include "scratch.h" #include "scratch.h"
INTERNAL THREAD_LOCAL_VAR_ALLOC_FUNC_DEF(scratch_context_alloc, vctx) INTERNAL THREAD_LOCAL_VAR_ALLOC_FUNC_DEF(scratch_context_alloc, vctx)
{ {
__prof; __prof;
struct scratch_ctx *ctx = vctx; struct scratch_ctx *ctx = vctx;
for (u32 i = 0; i < ARRAY_COUNT(ctx->arenas); ++i) { for (u32 i = 0; i < countof(ctx->arenas); ++i) {
ctx->arenas[i] = arena_alloc(SCRATCH_ARENA_RESERVE); ctx->arenas[i] = arena_alloc(SCRATCH_ARENA_RESERVE);
} }
} }
@ -21,9 +22,10 @@ INTERNAL THREAD_LOCAL_VAR_RELEASE_FUNC_DEF(scratch_context_release, vctx)
ASSERT(ctx->scratch_id_stack_count == 0); ASSERT(ctx->scratch_id_stack_count == 0);
#endif #endif
for (u32 i = 0; i < ARRAY_COUNT(ctx->arenas); ++i) { for (u32 i = 0; i < countof(ctx->arenas); ++i) {
arena_release(ctx->arenas[i]); arena_release(ctx->arenas[i]);
} }
} }
THREAD_LOCAL_VAR_DEF_EXTERN(tl_scratch_ctx, struct scratch_ctx, &scratch_context_alloc, &scratch_context_release); THREAD_LOCAL_VAR_DEF_EXTERN(tl_scratch_ctx, struct scratch_ctx, &scratch_context_alloc, &scratch_context_release);
#endif

View File

@ -1,3 +1,83 @@
#if 1
#ifndef SCRATCH_H
#define SCRATCH_H
#include "arena.h"
#include "sys.h"
/* ========================== *
* Scratch begin
* ========================== */
/* Any parameterized arenas in the caller's scope should be passed into this
* function as a potential "conflict". This is to prevent friction in case the
* passed arena is itself a scratch arena from another scope (since
* parameterized arenas are often used to allocate persistent results for the
* caller).
*
* Use `scratch_begin_no_conflict` instead if there is no arena in the current
* scope that could potentially be a scratch arena from another scope. */
#define scratch_begin(potential_conflict) _scratch_begin(potential_conflict)
INLINE struct arena_temp _scratch_begin(struct arena *potential_conflict)
{
/* This function is currently hard-coded to support 2 scratch arenas */
CT_ASSERT(SYS_SCRATCH_ARENAS_PER_FIBER == 2);
/* Use `scratch_begin_no_conflict` if no conflicts are present */
ASSERT(potential_conflict != NULL);
struct sys_scratch_ctx *ctx = sys_scratch_ctx_from_fiber_id(sys_current_fiber_id());
struct arena *scratch_arena = ctx->arenas[0];
if (potential_conflict && scratch_arena == potential_conflict) {
scratch_arena = ctx->arenas[1];
}
struct arena_temp temp = arena_temp_begin(scratch_arena);
return temp;
}
/* This macro declares an unused "arena" variable that will error if an existing "arena"
* variable is present (due to shadowing). This is for catching obvious cases of
* `scratch_begin_no_conflict` getting called when an `arena` variable already
* exists in the caller's scope (`scratch_begin(arena)` should be called
* instead). */
#define scratch_begin_no_conflict() \
_scratch_begin_no_conflict(); \
do { \
u8 arena = 0; \
(UNUSED)arena; \
} while (0)
INLINE struct arena_temp _scratch_begin_no_conflict(void)
{
struct sys_scratch_ctx *ctx = sys_scratch_ctx_from_fiber_id(sys_current_fiber_id());
struct arena *scratch_arena = ctx->arenas[0];
struct arena_temp temp = arena_temp_begin(scratch_arena);
return temp;
}
/* ========================== *
* Scratch end
* ========================== */
INLINE void scratch_end(struct arena_temp scratch_temp)
{
arena_temp_end(scratch_temp);
}
#endif
#else
#ifndef SCRATCH_H #ifndef SCRATCH_H
#define SCRATCH_H #define SCRATCH_H
@ -5,7 +85,7 @@
#include "sys.h" #include "sys.h"
#include "thread_local.h" #include "thread_local.h"
#define SCRATCH_ARENAS_PER_THREAD 2 #define SYS_SCRATCH_ARENAS_PER_FIBER 2
#define SCRATCH_ARENA_RESERVE (GIGABYTE(64)) #define SCRATCH_ARENA_RESERVE (GIGABYTE(64))
/* ========================== * /* ========================== *
@ -13,7 +93,7 @@
* ========================== */ * ========================== */
struct scratch_ctx { struct scratch_ctx {
struct arena *arenas[SCRATCH_ARENAS_PER_THREAD]; struct arena *arenas[SYS_SCRATCH_ARENAS_PER_FIBER];
#if RTC #if RTC
u64 next_scratch_id; u64 next_scratch_id;
@ -31,7 +111,7 @@ THREAD_LOCAL_VAR_DECL_EXTERN(tl_scratch_ctx, struct scratch_ctx);
INLINE void scratch_dbg_push(struct scratch_ctx *ctx, struct arena_temp *temp) INLINE void scratch_dbg_push(struct scratch_ctx *ctx, struct arena_temp *temp)
{ {
#if RTC #if RTC
if (ctx->scratch_id_stack_count >= ARRAY_COUNT(ctx->scratch_id_stack)) { if (ctx->scratch_id_stack_count >= countof(ctx->scratch_id_stack)) {
sys_panic(LIT("Max debug scratch depth reached")); sys_panic(LIT("Max debug scratch depth reached"));
} }
temp->scratch_id = ctx->next_scratch_id++; temp->scratch_id = ctx->next_scratch_id++;
@ -55,7 +135,7 @@ INLINE void scratch_dbg_push(struct scratch_ctx *ctx, struct arena_temp *temp)
INLINE struct arena_temp _scratch_begin(struct arena *potential_conflict) INLINE struct arena_temp _scratch_begin(struct arena *potential_conflict)
{ {
/* This function is currently hard-coded to support 2 scratch arenas */ /* This function is currently hard-coded to support 2 scratch arenas */
CT_ASSERT(SCRATCH_ARENAS_PER_THREAD == 2); CT_ASSERT(SYS_SCRATCH_ARENAS_PER_FIBER == 2);
/* Use `scratch_begin_no_conflict` if no conflicts are present */ /* Use `scratch_begin_no_conflict` if no conflicts are present */
ASSERT(potential_conflict != NULL); ASSERT(potential_conflict != NULL);
@ -114,3 +194,4 @@ INLINE void scratch_end(struct arena_temp scratch_temp)
} }
#endif #endif
#endif

View File

@ -664,7 +664,7 @@ INTERNAL void test_generate_walls(struct sim_snapshot *world)
wall_ent->local_collider.points[1] = v2_sub(end, start); wall_ent->local_collider.points[1] = v2_sub(end, start);
struct v2 dirs[4] = { V2(0, -1), V2(1, 0), V2(0, 1), V2(-1, 0) }; struct v2 dirs[4] = { V2(0, -1), V2(1, 0), V2(0, 1), V2(-1, 0) };
ASSERT(node->wall_dir >= 0 && (u32)node->wall_dir < ARRAY_COUNT(dirs)); ASSERT(node->wall_dir >= 0 && (u32)node->wall_dir < countof(dirs));
wall_ent->collision_dir = dirs[node->wall_dir]; wall_ent->collision_dir = dirs[node->wall_dir];
sim_ent_activate(wall_ent, world->tick); sim_ent_activate(wall_ent, world->tick);

View File

@ -108,7 +108,7 @@ struct sock_address sock_address_from_string(struct string str)
{ {
/* Parse string into ip & port */ /* Parse string into ip & port */
u8 ip_buff[1024]; u8 ip_buff[1024];
u8 port_buff[ARRAY_COUNT(ip_buff)]; u8 port_buff[countof(ip_buff)];
char *ip_cstr = NULL; char *ip_cstr = NULL;
char *port_cstr = NULL; char *port_cstr = NULL;
{ {
@ -121,7 +121,7 @@ struct sock_address sock_address_from_string(struct string str)
} }
u64 ip_len = 0; u64 ip_len = 0;
u64 port_len = 0; u64 port_len = 0;
u64 parse_len = min_u64(min_u64(str.len, ARRAY_COUNT(ip_buff) - 1), ARRAY_COUNT(port_buff) - 1); u64 parse_len = min_u64(min_u64(str.len, countof(ip_buff) - 1), countof(port_buff) - 1);
if (colon_count > 1 && str.text[0] == '[') { if (colon_count > 1 && str.text[0] == '[') {
/* Parse ipv6 with port */ /* Parse ipv6 with port */
b32 parse_addr = true; b32 parse_addr = true;
@ -158,7 +158,7 @@ struct sock_address sock_address_from_string(struct string str)
} }
} else { } else {
/* Copy address without port */ /* Copy address without port */
ip_len = min_u64(str.len, ARRAY_COUNT(ip_buff) - 1); ip_len = min_u64(str.len, countof(ip_buff) - 1);
MEMCPY(ip_buff, str.text, ip_len); MEMCPY(ip_buff, str.text, ip_len);
} }
if (ip_len > 0) { if (ip_len > 0) {
@ -181,9 +181,9 @@ struct sock_address sock_address_from_port(u16 port)
u8 port_buff[128]; u8 port_buff[128];
char *port_cstr = NULL; char *port_cstr = NULL;
{ {
u8 port_buff_reverse[ARRAY_COUNT(port_buff)]; u8 port_buff_reverse[countof(port_buff)];
u64 port_len = 0; u64 port_len = 0;
while (port > 0 && port_len < (ARRAY_COUNT(port_buff) - 1)) { while (port > 0 && port_len < (countof(port_buff) - 1)) {
u8 digit = port % 10; u8 digit = port % 10;
port /= 10; port /= 10;
port_buff_reverse[port_len] = '0' + digit; port_buff_reverse[port_len] = '0' + digit;

View File

@ -331,7 +331,7 @@ INTERNAL void push_load_job(struct cache_ref ref, struct sprite_tag tag)
cmd->ref = scope_ensure_ref_from_ref(cmd->scope, ref)->ref; cmd->ref = scope_ensure_ref_from_ref(cmd->scope, ref)->ref;
cmd->tag = tag; cmd->tag = tag;
{ {
u64 copy_len = min_u64(tag.path.len, ARRAY_COUNT(cmd->tag_path_buff)); u64 copy_len = min_u64(tag.path.len, countof(cmd->tag_path_buff));
cmd->tag.path.text = cmd->tag_path_buff; cmd->tag.path.text = cmd->tag_path_buff;
MEMCPY(cmd->tag.path.text, tag.path.text, copy_len); MEMCPY(cmd->tag.path.text, tag.path.text, copy_len);
} }

View File

@ -39,7 +39,7 @@ struct string string_from_char(struct arena *arena, char c)
struct string string_from_uint(struct arena *arena, u64 n, u64 base, u64 zfill) struct string string_from_uint(struct arena *arena, u64 n, u64 base, u64 zfill)
{ {
/* Base too large */ /* Base too large */
ASSERT(base <= (ARRAY_COUNT(INT_CHARS) - 1)); ASSERT(base <= (countof(INT_CHARS) - 1));
struct arena_temp scratch = scratch_begin(arena); struct arena_temp scratch = scratch_begin(arena);

View File

@ -392,12 +392,6 @@ void sys_condition_variable_wait_time(struct sys_condition_variable *sys_cv, str
void sys_condition_variable_signal(struct sys_condition_variable *sys_cv, u32 count); void sys_condition_variable_signal(struct sys_condition_variable *sys_cv, u32 count);
void sys_condition_variable_broadcast(struct sys_condition_variable *sys_cv); void sys_condition_variable_broadcast(struct sys_condition_variable *sys_cv);
/* ========================== *
* Thread local storage
* ========================== */
struct thread_local_store *sys_thread_get_thread_local_store(void);
/* ========================== * /* ========================== *
* Threads * Threads
* ========================== */ * ========================== */
@ -474,4 +468,58 @@ void sys_sleep(f64 seconds);
b32 sys_run_command(struct string cmd); b32 sys_run_command(struct string cmd);
/* ========================== *
* Fiber
* ========================== */
#define SYS_MAX_FIBERS 65536
i32 sys_current_fiber_id(void);
/* ========================== *
* Scratch
* ========================== */
#define SYS_SCRATCH_ARENAS_PER_FIBER 2
struct sys_scratch_ctx {
struct arena *arenas[SYS_SCRATCH_ARENAS_PER_FIBER];
};
struct sys_scratch_ctx *sys_scratch_ctx_from_fiber_id(i32 fiber_id);
/* ========================== *
* Job
* ========================== */
struct sys_job_data {
i32 id;
void *sig;
};
#define SYS_JOB_DEF(job_name, arg_name) void job_name(struct sys_job_data arg_name)
typedef SYS_JOB_DEF(sys_job_func, job_data);
/* ========================== *
* App entry point
* ========================== */
/* Must be defined by app */
void sys_app_entry(struct string args_str);
#endif #endif

View File

@ -778,7 +778,7 @@ struct sys_watch_info_list sys_watch_wait(struct arena *arena, struct sys_watch
BOOL success = ReadDirectoryChangesW(w32_watch->dir_handle, BOOL success = ReadDirectoryChangesW(w32_watch->dir_handle,
w32_watch->results_buff, w32_watch->results_buff,
ARRAY_COUNT(w32_watch->results_buff), countof(w32_watch->results_buff),
true, true,
filter, filter,
NULL, NULL,
@ -1274,7 +1274,7 @@ INTERNAL LRESULT CALLBACK win32_window_proc(HWND hwnd, UINT msg, WPARAM wparam,
} }
enum sys_btn button = SYS_BTN_NONE; enum sys_btn button = SYS_BTN_NONE;
if (vk_code < ARRAY_COUNT(G.vk_btn_table)) { if (vk_code < countof(G.vk_btn_table)) {
button = G.vk_btn_table[vk_code]; button = G.vk_btn_table[vk_code];
} }
@ -1302,7 +1302,7 @@ INTERNAL LRESULT CALLBACK win32_window_proc(HWND hwnd, UINT msg, WPARAM wparam,
u16 low = utf16_char; u16 low = utf16_char;
if (high) { if (high) {
u16 utf16_pair_bytes[2] = { high, low }; u16 utf16_pair_bytes[2] = { high, low };
struct uni_decode_utf16_result decoded = uni_decode_utf16((struct string16) { .len = ARRAY_COUNT(utf16_pair_bytes), .text = utf16_pair_bytes }); struct uni_decode_utf16_result decoded = uni_decode_utf16((struct string16) { .len = countof(utf16_pair_bytes), .text = utf16_pair_bytes });
if (decoded.advance16 == 2 && decoded.codepoint < U32_MAX) { if (decoded.advance16 == 2 && decoded.codepoint < U32_MAX) {
codepoint = decoded.codepoint; codepoint = decoded.codepoint;
} }
@ -1465,7 +1465,7 @@ void sys_window_register_event_callback(struct sys_window *sys_window, sys_windo
struct win32_window *window = (struct win32_window *)sys_window; struct win32_window *window = (struct win32_window *)sys_window;
struct sys_lock lock = sys_mutex_lock_e(window->event_callbacks_mutex); struct sys_lock lock = sys_mutex_lock_e(window->event_callbacks_mutex);
{ {
if (window->event_callbacks_count + 1 > ARRAY_COUNT(window->event_callbacks)) { if (window->event_callbacks_count + 1 > countof(window->event_callbacks)) {
sys_panic(LIT("Too many window event callbacks registered")); sys_panic(LIT("Too many window event callbacks registered"));
} else { } else {
window->event_callbacks[window->event_callbacks_count++] = func; window->event_callbacks[window->event_callbacks_count++] = func;
@ -1979,13 +1979,13 @@ struct sys_thread *sys_thread_alloc(sys_thread_func *entry_point, void *thread_d
/* Copy thread name to params */ /* Copy thread name to params */
{ {
u64 cstr_len = min_u64((ARRAY_COUNT(t->thread_name_cstr) - 1), thread_name.len); u64 cstr_len = min_u64((countof(t->thread_name_cstr) - 1), thread_name.len);
MEMCPY(t->thread_name_cstr, thread_name.text, cstr_len * sizeof(*t->thread_name_cstr)); MEMCPY(t->thread_name_cstr, thread_name.text, cstr_len * sizeof(*t->thread_name_cstr));
t->thread_name_cstr[cstr_len] = 0; t->thread_name_cstr[cstr_len] = 0;
} }
{ {
struct string16 thread_name16 = string16_from_string(scratch.arena, thread_name); struct string16 thread_name16 = string16_from_string(scratch.arena, thread_name);
u64 wstr_len = min_u64((ARRAY_COUNT(t->thread_name_wstr) - 1), thread_name16.len); u64 wstr_len = min_u64((countof(t->thread_name_wstr) - 1), thread_name16.len);
MEMCPY(t->thread_name_wstr, thread_name16.text, wstr_len * sizeof(*t->thread_name_wstr)); MEMCPY(t->thread_name_wstr, thread_name16.text, wstr_len * sizeof(*t->thread_name_wstr));
t->thread_name_wstr[wstr_len] = 0; t->thread_name_wstr[wstr_len] = 0;
} }
@ -2174,8 +2174,8 @@ void sys_panic(struct string msg)
u64 wstr_len = 0; u64 wstr_len = 0;
wchar_t prefix[] = L"A fatal error has occured and the application needs to exit:\n\n"; wchar_t prefix[] = L"A fatal error has occured and the application needs to exit:\n\n";
MEMCPY(wstr, prefix, min_u64(ARRAY_COUNT(G.panic_wstr), (ARRAY_COUNT(prefix) << 1))); MEMCPY(wstr, prefix, min_u64(countof(G.panic_wstr), (countof(prefix) << 1)));
wstr_len += ARRAY_COUNT(prefix) - 1; wstr_len += countof(prefix) - 1;
/* Perform manual string encode to avoid any implicit memory /* Perform manual string encode to avoid any implicit memory
* allocation (in case allocation is unreliable) */ * allocation (in case allocation is unreliable) */
@ -2186,7 +2186,7 @@ void sys_panic(struct string msg)
struct uni_decode_utf8_result decoded = uni_decode_utf8(str8_remaining); struct uni_decode_utf8_result decoded = uni_decode_utf8(str8_remaining);
struct uni_encode_utf16_result encoded = uni_encode_utf16(decoded.codepoint); struct uni_encode_utf16_result encoded = uni_encode_utf16(decoded.codepoint);
u64 wstr_new_len = wstr_len + encoded.count16; u64 wstr_new_len = wstr_len + encoded.count16;
if (wstr_new_len < (ARRAY_COUNT(G.panic_wstr) - 1)) { if (wstr_new_len < (countof(G.panic_wstr) - 1)) {
u16 *dest = wstr + wstr_len; u16 *dest = wstr + wstr_len;
MEMCPY(dest, encoded.chars16, (encoded.count16 << 1)); MEMCPY(dest, encoded.chars16, (encoded.count16 << 1));
wstr_len = wstr_new_len; wstr_len = wstr_new_len;
@ -2651,7 +2651,7 @@ INTERNAL SYS_THREAD_DEF(win32_app_thread_entry_point, arg)
{ {
(UNUSED)arg; (UNUSED)arg;
struct arena_temp scratch = scratch_begin_no_conflict(); struct arena_temp scratch = scratch_begin_no_conflict();
struct string cmdline_args = string_from_wstr(scratch.arena, G.cmdline_args_wstr, ARRAY_COUNT(G.cmdline_args_wstr)); struct string cmdline_args = string_from_wstr(scratch.arena, G.cmdline_args_wstr, countof(G.cmdline_args_wstr));
app_entry_point(cmdline_args); app_entry_point(cmdline_args);
scratch_end(scratch); scratch_end(scratch);
} }
@ -2663,7 +2663,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
(UNUSED)cmdline_wstr; (UNUSED)cmdline_wstr;
(UNUSED)show_code; (UNUSED)show_code;
u64 cmdline_len = wstr_len(cmdline_wstr, ARRAY_COUNT(G.cmdline_args_wstr) - 1); u64 cmdline_len = wstr_len(cmdline_wstr, countof(G.cmdline_args_wstr) - 1);
MEMCPY(G.cmdline_args_wstr, cmdline_wstr, cmdline_len * sizeof(*cmdline_wstr)); MEMCPY(G.cmdline_args_wstr, cmdline_wstr, cmdline_len * sizeof(*cmdline_wstr));
G.cmdline_args_wstr[cmdline_len] = 0; G.cmdline_args_wstr[cmdline_len] = 0;
@ -2745,7 +2745,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
/* Use first icon resource as window icon (same as explorer) */ /* Use first icon resource as window icon (same as explorer) */
wchar_t path[4096] = ZI; wchar_t path[4096] = ZI;
GetModuleFileNameW(instance, path, ARRAY_COUNT(path)); GetModuleFileNameW(instance, path, countof(path));
ExtractIconExW(path, 0, &wc->hIcon, &wc->hIconSm, 1); ExtractIconExW(path, 0, &wc->hIcon, &wc->hIconSm, 1);
if (!RegisterClassExW(wc)) { if (!RegisterClassExW(wc)) {
@ -2798,7 +2798,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
app_thread_handle, app_thread_handle,
G.panic_event G.panic_event
}; };
DWORD res = WaitForMultipleObjects(ARRAY_COUNT(wait_handles), wait_handles, false, INFINITE); DWORD res = WaitForMultipleObjects(countof(wait_handles), wait_handles, false, INFINITE);
if (res == WAIT_OBJECT_0) { if (res == WAIT_OBJECT_0) {
sys_thread_force_release(app_thread); sys_thread_force_release(app_thread);
} }
@ -2816,7 +2816,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
struct string threads_msg = ZI; struct string threads_msg = ZI;
threads_msg.text = arena_push_dry(scratch.arena, u8); threads_msg.text = arena_push_dry(scratch.arena, u8);
for (struct win32_thread *t = G.threads_first; t; t = t->next) { for (struct win32_thread *t = G.threads_first; t; t = t->next) {
struct string name = string_from_cstr(t->thread_name_cstr, ARRAY_COUNT(t->thread_name_cstr)); struct string name = string_from_cstr(t->thread_name_cstr, countof(t->thread_name_cstr));
threads_msg.len += string_format(scratch.arena, LIT(" \"%F\"\n"), FMT_STR(name)).len; threads_msg.len += string_format(scratch.arena, LIT(" \"%F\"\n"), FMT_STR(name)).len;
++num_dangling_threads; ++num_dangling_threads;
} }

View File

@ -10,7 +10,6 @@
#include "log.h" #include "log.h"
#include "math.h" #include "math.h"
#include "util.h" #include "util.h"
#include "thread_local.h"
#include "uni.h" #include "uni.h"
#pragma warning(push, 0) #pragma warning(push, 0)
@ -111,6 +110,27 @@ struct win32_window {
struct win32_window *next_free; struct win32_window *next_free;
}; };
struct alignas(64) fiber_ctx {
i32 id; /* 4 bytes */
u8 pad0[4]; /* 4 bytes */
HANDLE sleep_timer; /* 8 bytes */
struct sys_scratch_ctx scratch_ctx; /* 16 bytes */
u8 pad1[16]; /* 32 bytes */
};
CT_ASSERT(sizeof(struct fiber_ctx) == 64);
CT_ASSERT(alignof(struct fiber_ctx) == 64);
/* ========================== * /* ========================== *
* Global state * Global state
* ========================== */ * ========================== */
@ -161,8 +181,80 @@ GLOBAL struct {
struct sys_mutex *windows_mutex; struct sys_mutex *windows_mutex;
struct arena *windows_arena; struct arena *windows_arena;
struct win32_window *first_free_window; struct win32_window *first_free_window;
/* Fibers */
struct atomic_i32 num_fibers;
struct fiber_ctx fiber_contexts[SYS_MAX_FIBERS];
} G = ZI, DEBUG_ALIAS(G, G_sys_win32); } G = ZI, DEBUG_ALIAS(G, G_sys_win32);
/* ========================== *
* Fibers
* ========================== */
INTERNAL i32 fiber_ctx_init(void)
{
i32 id = atomic_i32_fetch_add(&G.num_fibers, 1);
if (id >= SYS_MAX_FIBERS) {
sys_panic(LIT("Max fibers reached"));
}
struct fiber_ctx *ctx = &G.fiber_contexts[id];
{
ctx->id = id;
ctx->sleep_timer = CreateWaitableTimerExW(NULL, NULL, CREATE_WAITABLE_TIMER_HIGH_RESOLUTION, TIMER_ALL_ACCESS);
}
return id;
}
INTERNAL struct fiber_ctx *fiber_ctx_from_id(i32 id)
{
return &G.fiber_contexts[id];
}
i32 sys_current_fiber_id(void)
{
return (i32)(i64)GetFiberData();
}
/* ========================== *
* Scratch
* ========================== */
struct sys_scratch_ctx *sys_scratch_ctx_from_fiber_id(i32 id)
{
struct fiber_ctx *fiber_ctx = fiber_ctx_from_id(id);
struct sys_scratch_ctx *scratch_ctx = &fiber_ctx->scratch_ctx;
if (!scratch_ctx->arenas[0]) {
__profscope(Initialize scratch ctx);
for (u32 i = 0; i < countof(scratch_ctx->arenas); ++i) {
scratch_ctx->arenas[i] = arena_alloc(GIGABYTE(64));
}
}
return scratch_ctx;
}
/* ========================== * /* ========================== *
* Events * Events
* ========================== */ * ========================== */
@ -778,7 +870,7 @@ struct sys_watch_info_list sys_watch_wait(struct arena *arena, struct sys_watch
BOOL success = ReadDirectoryChangesW(w32_watch->dir_handle, BOOL success = ReadDirectoryChangesW(w32_watch->dir_handle,
w32_watch->results_buff, w32_watch->results_buff,
ARRAY_COUNT(w32_watch->results_buff), countof(w32_watch->results_buff),
true, true,
filter, filter,
NULL, NULL,
@ -1274,7 +1366,7 @@ INTERNAL LRESULT CALLBACK win32_window_proc(HWND hwnd, UINT msg, WPARAM wparam,
} }
enum sys_btn button = SYS_BTN_NONE; enum sys_btn button = SYS_BTN_NONE;
if (vk_code < ARRAY_COUNT(G.vk_btn_table)) { if (vk_code < countof(G.vk_btn_table)) {
button = G.vk_btn_table[vk_code]; button = G.vk_btn_table[vk_code];
} }
@ -1302,7 +1394,7 @@ INTERNAL LRESULT CALLBACK win32_window_proc(HWND hwnd, UINT msg, WPARAM wparam,
u16 low = utf16_char; u16 low = utf16_char;
if (high) { if (high) {
u16 utf16_pair_bytes[2] = { high, low }; u16 utf16_pair_bytes[2] = { high, low };
struct uni_decode_utf16_result decoded = uni_decode_utf16((struct string16) { .len = ARRAY_COUNT(utf16_pair_bytes), .text = utf16_pair_bytes }); struct uni_decode_utf16_result decoded = uni_decode_utf16((struct string16) { .len = countof(utf16_pair_bytes), .text = utf16_pair_bytes });
if (decoded.advance16 == 2 && decoded.codepoint < U32_MAX) { if (decoded.advance16 == 2 && decoded.codepoint < U32_MAX) {
codepoint = decoded.codepoint; codepoint = decoded.codepoint;
} }
@ -1465,7 +1557,7 @@ void sys_window_register_event_callback(struct sys_window *sys_window, sys_windo
struct win32_window *window = (struct win32_window *)sys_window; struct win32_window *window = (struct win32_window *)sys_window;
struct sys_lock lock = sys_mutex_lock_e(window->event_callbacks_mutex); struct sys_lock lock = sys_mutex_lock_e(window->event_callbacks_mutex);
{ {
if (window->event_callbacks_count + 1 > ARRAY_COUNT(window->event_callbacks)) { if (window->event_callbacks_count + 1 > countof(window->event_callbacks)) {
sys_panic(LIT("Too many window event callbacks registered")); sys_panic(LIT("Too many window event callbacks registered"));
} else { } else {
window->event_callbacks[window->event_callbacks_count++] = func; window->event_callbacks[window->event_callbacks_count++] = func;
@ -1845,45 +1937,6 @@ void sys_condition_variable_broadcast(struct sys_condition_variable *sys_cv)
WakeAllConditionVariable(&cv->condition_variable); WakeAllConditionVariable(&cv->condition_variable);
} }
/* ========================== *
* Thread local storage
* ========================== */
struct win32_tls {
HANDLE sleep_timer;
struct thread_local_store store;
};
INTERNAL void win32_thread_set_tls(struct win32_tls *ctx)
{
TlsSetValue(G.thread_tls_index, (LPVOID)ctx);
}
INTERNAL struct win32_tls *win32_thread_get_tls(void)
{
return TlsGetValue(G.thread_tls_index);
}
INTERNAL struct win32_tls win32_tls_alloc(void)
{
struct win32_tls tls = ZI;
tls.sleep_timer = CreateWaitableTimerExW(NULL, NULL, CREATE_WAITABLE_TIMER_HIGH_RESOLUTION, TIMER_ALL_ACCESS);
tls.store = thread_local_store_alloc();
return tls;
}
INTERNAL void win32_tls_release(struct win32_tls *tls)
{
thread_local_store_release(&tls->store);
CloseHandle(tls->sleep_timer);
}
struct thread_local_store *sys_thread_get_thread_local_store(void)
{
struct win32_tls *thread_ctx = (struct win32_tls *)win32_thread_get_tls();
return &thread_ctx->store;
}
/* ========================== * /* ========================== *
* Threads * Threads
* ========================== */ * ========================== */
@ -1938,13 +1991,19 @@ INTERNAL DWORD WINAPI win32_thread_proc(LPVOID vt)
struct win32_thread *t = (struct win32_thread *)vt; struct win32_thread *t = (struct win32_thread *)vt;
__profthread(t->thread_name_cstr); __profthread(t->thread_name_cstr);
i32 fiber_id = fiber_ctx_init();
void *fiber_addr = ConvertThreadToFiber((void *)(i64)fiber_id);
(UNUSED)fiber_addr;
{
i32 test = sys_current_fiber_id();
(UNUSED)test;
(UNUSED)fiber_ctx_from_id;
DEBUGBREAKABLE;
}
/* Initialize COM */ /* Initialize COM */
CoInitializeEx(NULL, COINIT_APARTMENTTHREADED); CoInitializeEx(NULL, COINIT_APARTMENTTHREADED);
/* Initialize TLS */
struct win32_tls tls = win32_tls_alloc();
win32_thread_set_tls(&tls);
/* Set thread name */ /* Set thread name */
if (t->thread_name_wstr[0] != 0) { if (t->thread_name_wstr[0] != 0) {
SetThreadDescription(GetCurrentThread(), t->thread_name_wstr); SetThreadDescription(GetCurrentThread(), t->thread_name_wstr);
@ -1955,9 +2014,6 @@ INTERNAL DWORD WINAPI win32_thread_proc(LPVOID vt)
/* Enter thread entry point */ /* Enter thread entry point */
t->entry_point(t->thread_data); t->entry_point(t->thread_data);
/* Release TLS */
win32_tls_release(&tls);
/* Uninitialize COM */ /* Uninitialize COM */
CoUninitialize(); CoUninitialize();
@ -1979,13 +2035,13 @@ struct sys_thread *sys_thread_alloc(sys_thread_func *entry_point, void *thread_d
/* Copy thread name to params */ /* Copy thread name to params */
{ {
u64 cstr_len = min_u64((ARRAY_COUNT(t->thread_name_cstr) - 1), thread_name.len); u64 cstr_len = min_u64((countof(t->thread_name_cstr) - 1), thread_name.len);
MEMCPY(t->thread_name_cstr, thread_name.text, cstr_len * sizeof(*t->thread_name_cstr)); MEMCPY(t->thread_name_cstr, thread_name.text, cstr_len * sizeof(*t->thread_name_cstr));
t->thread_name_cstr[cstr_len] = 0; t->thread_name_cstr[cstr_len] = 0;
} }
{ {
struct string16 thread_name16 = string16_from_string(scratch.arena, thread_name); struct string16 thread_name16 = string16_from_string(scratch.arena, thread_name);
u64 wstr_len = min_u64((ARRAY_COUNT(t->thread_name_wstr) - 1), thread_name16.len); u64 wstr_len = min_u64((countof(t->thread_name_wstr) - 1), thread_name16.len);
MEMCPY(t->thread_name_wstr, thread_name16.text, wstr_len * sizeof(*t->thread_name_wstr)); MEMCPY(t->thread_name_wstr, thread_name16.text, wstr_len * sizeof(*t->thread_name_wstr));
t->thread_name_wstr[wstr_len] = 0; t->thread_name_wstr[wstr_len] = 0;
} }
@ -2174,8 +2230,8 @@ void sys_panic(struct string msg)
u64 wstr_len = 0; u64 wstr_len = 0;
wchar_t prefix[] = L"A fatal error has occured and the application needs to exit:\n\n"; wchar_t prefix[] = L"A fatal error has occured and the application needs to exit:\n\n";
MEMCPY(wstr, prefix, min_u64(ARRAY_COUNT(G.panic_wstr), (ARRAY_COUNT(prefix) << 1))); MEMCPY(wstr, prefix, min_u64(countof(G.panic_wstr), (countof(prefix) << 1)));
wstr_len += ARRAY_COUNT(prefix) - 1; wstr_len += countof(prefix) - 1;
/* Perform manual string encode to avoid any implicit memory /* Perform manual string encode to avoid any implicit memory
* allocation (in case allocation is unreliable) */ * allocation (in case allocation is unreliable) */
@ -2186,7 +2242,7 @@ void sys_panic(struct string msg)
struct uni_decode_utf8_result decoded = uni_decode_utf8(str8_remaining); struct uni_decode_utf8_result decoded = uni_decode_utf8(str8_remaining);
struct uni_encode_utf16_result encoded = uni_encode_utf16(decoded.codepoint); struct uni_encode_utf16_result encoded = uni_encode_utf16(decoded.codepoint);
u64 wstr_new_len = wstr_len + encoded.count16; u64 wstr_new_len = wstr_len + encoded.count16;
if (wstr_new_len < (ARRAY_COUNT(G.panic_wstr) - 1)) { if (wstr_new_len < (countof(G.panic_wstr) - 1)) {
u16 *dest = wstr + wstr_len; u16 *dest = wstr + wstr_len;
MEMCPY(dest, encoded.chars16, (encoded.count16 << 1)); MEMCPY(dest, encoded.chars16, (encoded.count16 << 1));
wstr_len = wstr_new_len; wstr_len = wstr_new_len;
@ -2302,7 +2358,8 @@ INTERNAL void win32_precise_sleep_legacy(f64 seconds)
void sys_sleep_precise(f64 seconds) void sys_sleep_precise(f64 seconds)
{ {
__prof; __prof;
HANDLE timer = win32_thread_get_tls()->sleep_timer; struct fiber_ctx *ctx = fiber_ctx_from_id(sys_current_fiber_id());
HANDLE timer = ctx->sleep_timer;
if (timer) { if (timer) {
/* Use newer sleeping method */ /* Use newer sleeping method */
win32_precise_sleep_timer(seconds, timer); win32_precise_sleep_timer(seconds, timer);
@ -2347,8 +2404,8 @@ INTERNAL SYS_THREAD_DEF(win32_app_thread_entry_point, arg)
{ {
(UNUSED)arg; (UNUSED)arg;
struct arena_temp scratch = scratch_begin_no_conflict(); struct arena_temp scratch = scratch_begin_no_conflict();
struct string cmdline_args = string_from_wstr(scratch.arena, G.cmdline_args_wstr, ARRAY_COUNT(G.cmdline_args_wstr)); struct string cmdline_args = string_from_wstr(scratch.arena, G.cmdline_args_wstr, countof(G.cmdline_args_wstr));
app_entry_point(cmdline_args); sys_app_entry(cmdline_args);
scratch_end(scratch); scratch_end(scratch);
} }
@ -2359,7 +2416,11 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
(UNUSED)cmdline_wstr; (UNUSED)cmdline_wstr;
(UNUSED)show_code; (UNUSED)show_code;
u64 cmdline_len = wstr_len(cmdline_wstr, ARRAY_COUNT(G.cmdline_args_wstr) - 1); /* Convert main thread to fiber */
i32 fiber_id = fiber_ctx_init();
ConvertThreadToFiber((void *)(i64)fiber_id);
u64 cmdline_len = wstr_len(cmdline_wstr, countof(G.cmdline_args_wstr) - 1);
MEMCPY(G.cmdline_args_wstr, cmdline_wstr, cmdline_len * sizeof(*cmdline_wstr)); MEMCPY(G.cmdline_args_wstr, cmdline_wstr, cmdline_len * sizeof(*cmdline_wstr));
G.cmdline_args_wstr[cmdline_len] = 0; G.cmdline_args_wstr[cmdline_len] = 0;
@ -2441,7 +2502,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
/* Use first icon resource as window icon (same as explorer) */ /* Use first icon resource as window icon (same as explorer) */
wchar_t path[4096] = ZI; wchar_t path[4096] = ZI;
GetModuleFileNameW(instance, path, ARRAY_COUNT(path)); GetModuleFileNameW(instance, path, countof(path));
ExtractIconExW(path, 0, &wc->hIcon, &wc->hIconSm, 1); ExtractIconExW(path, 0, &wc->hIcon, &wc->hIconSm, 1);
if (!RegisterClassExW(wc)) { if (!RegisterClassExW(wc)) {
@ -2470,10 +2531,6 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
* App thread setup * App thread setup
* ========================== */ * ========================== */
/* Initialize main thread context. */
struct win32_tls main_thread_tls = win32_tls_alloc();
win32_thread_set_tls(&main_thread_tls);
/* Call app thread and wait for return */ /* Call app thread and wait for return */
{ {
/* Start app thread */ /* Start app thread */
@ -2494,7 +2551,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
app_thread_handle, app_thread_handle,
G.panic_event G.panic_event
}; };
DWORD res = WaitForMultipleObjects(ARRAY_COUNT(wait_handles), wait_handles, false, INFINITE); DWORD res = WaitForMultipleObjects(countof(wait_handles), wait_handles, false, INFINITE);
if (res == WAIT_OBJECT_0) { if (res == WAIT_OBJECT_0) {
sys_thread_force_release(app_thread); sys_thread_force_release(app_thread);
} }
@ -2512,7 +2569,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
struct string threads_msg = ZI; struct string threads_msg = ZI;
threads_msg.text = arena_push_dry(scratch.arena, u8); threads_msg.text = arena_push_dry(scratch.arena, u8);
for (struct win32_thread *t = G.threads_first; t; t = t->next) { for (struct win32_thread *t = G.threads_first; t; t = t->next) {
struct string name = string_from_cstr(t->thread_name_cstr, ARRAY_COUNT(t->thread_name_cstr)); struct string name = string_from_cstr(t->thread_name_cstr, countof(t->thread_name_cstr));
threads_msg.len += string_format(scratch.arena, LIT(" \"%F\"\n"), FMT_STR(name)).len; threads_msg.len += string_format(scratch.arena, LIT(" \"%F\"\n"), FMT_STR(name)).len;
++num_dangling_threads; ++num_dangling_threads;
} }
@ -2532,9 +2589,6 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
goto abort; goto abort;
} }
/* Release main thread context */
win32_tls_release(&main_thread_tls);
/* ========================== * /* ========================== *
* Abort * Abort
* ========================== */ * ========================== */

View File

@ -1,106 +0,0 @@
#include "thread_local.h"
#include "sys.h"
#include "arena.h"
#include "atomic.h"
#include "intrinsics.h"
/* Arbitrary limits. Increase if needed. */
#define MAX_THREAD_LOCAL_VARS 256
#define THREAD_LOCAL_STORE_RESERVE (MEGABYTE(64))
GLOBAL struct {
struct atomic_i32 metas_lock_flag;
u64 metas_count;
struct thread_local_var_meta metas[MAX_THREAD_LOCAL_VARS];
} G = ZI, DEBUG_ALIAS(G, G_thread_local);
INTERNAL void metas_lock(void)
{
/* Spinlock */
while (atomic_i32_fetch_test_set(&G.metas_lock_flag, 0, 1) == 0) {
ix_pause();
}
}
INTERNAL void metas_unlock(void)
{
atomic_i32_fetch_set(&G.metas_lock_flag, 0);
}
struct thread_local_store thread_local_store_alloc(void)
{
__prof;
struct thread_local_store t = ZI;
t.arena = arena_alloc(THREAD_LOCAL_STORE_RESERVE);
t.lookup = arena_push_array(t.arena, void *, MAX_THREAD_LOCAL_VARS);
t.allocation_order = arena_push_array(t.arena, u64, MAX_THREAD_LOCAL_VARS);
return t;
}
void thread_local_store_release(struct thread_local_store *t)
{
__prof;
/* Release allocated vars in reverse order */
metas_lock();
{
for (u64 i = t->allocation_order_count; i-- > 0;) {
u64 id = t->allocation_order[i];
void *data = t->lookup[id];
struct thread_local_var_meta *meta = &G.metas[id];
if (meta->release) {
meta->release(data);
}
}
}
metas_unlock();
arena_release(t->arena);
}
volatile void *_thread_local_var_fetch(struct thread_local_var_meta *meta)
{
/* Register var if unregistered */
u64 id;
{
u64 id_plus_one = atomic_u64_fetch(&meta->id_plus_one);
if (id_plus_one == 0) {
__profscope(Register thread local var);
metas_lock();
{
id_plus_one = atomic_u64_fetch(&meta->id_plus_one); /* Reevaluate now that we've locked */
if (id_plus_one == 0) {
id = G.metas_count++;
if (id >= MAX_THREAD_LOCAL_VARS) {
sys_panic(LIT("Maximum number of thread local variables reached"));
}
atomic_u64_fetch_set(&meta->id_plus_one, id + 1);
G.metas[id] = *meta;
} else {
id = id_plus_one - 1;
}
}
metas_unlock();
} else {
id = id_plus_one - 1;
}
}
struct thread_local_store *t = sys_thread_get_thread_local_store();
void *data = t->lookup[id];
/* Allocate var for thread if unallocated */
if (!data) {
__profscope(Alloc thread local var);
/* Allocate */
arena_align(t->arena, meta->align);
data = arena_push_array_no_zero(t->arena, u8, meta->size);
if (meta->alloc) {
meta->alloc(data);
} else {
MEMZERO(data, meta->size);
}
t->lookup[id] = data;
t->allocation_order[t->allocation_order_count++] = id;
}
return data;
}

View File

@ -1,65 +0,0 @@
#ifndef THREAD_LOCAL
#define THREAD_LOCAL
/* ========================== *
* Thread local store
* ========================== */
struct thread_local_store {
void **lookup;
struct arena *arena;
u64 allocation_order_count;
u64 *allocation_order;
};
struct thread_local_store thread_local_store_alloc(void);
void thread_local_store_release(struct thread_local_store *t);
/* ========================== *
* Thread local var
* ========================== */
#define THREAD_LOCAL_VAR_ALLOC_FUNC_DEF(name, arg_name) void name(void *arg_name)
typedef THREAD_LOCAL_VAR_ALLOC_FUNC_DEF(thread_local_var_alloc_func, ptr);
#define THREAD_LOCAL_VAR_RELEASE_FUNC_DEF(name, arg_name) void name(void *arg_name)
typedef THREAD_LOCAL_VAR_RELEASE_FUNC_DEF(thread_local_var_release_func, ptr);
struct thread_local_var_meta {
struct atomic_u64 id_plus_one;
u64 size;
u64 align;
thread_local_var_alloc_func *alloc;
thread_local_var_release_func *release;
};
#define THREAD_LOCAL_VAR_DEF(var_name, type, alloc_func, release_func) \
struct { struct thread_local_var_meta meta; type *_t; } var_name = { \
.meta = { \
.size = sizeof(type), \
.align = alignof(type), \
.alloc = (alloc_func), \
.release = (release_func) \
} \
}
#define THREAD_LOCAL_VAR_DECL_EXTERN(var_name, type) struct __thread_local_struct##var_name { struct thread_local_var_meta meta; type *_t; }; extern struct __thread_local_struct##var_name var_name
#define THREAD_LOCAL_VAR_DEF_EXTERN(var_name, type, alloc_func, release_func) \
struct __thread_local_struct##var_name var_name = { \
.meta = { \
.size = sizeof(type), \
.align = alignof(type), \
.alloc = (alloc_func), \
.release = (release_func) \
} \
}
#if TYPEOF_DEFINED
# define thread_local_var_fetch(var_ptr) (typeof((var_ptr)->_t))(_thread_local_var_fetch(&(var_ptr)->meta))
#else
# define thread_local_var_fetch(var_ptr) (void *)(_thread_local_var_fetch(&(var_ptr)->meta))
#endif
volatile void *_thread_local_var_fetch(struct thread_local_var_meta *meta);
#endif

View File

@ -381,10 +381,10 @@ INTERNAL struct string get_ent_debug_text(struct arena *arena, struct sim_ent *e
{ {
res.len += string_copy(arena, LIT("props: 0x")).len; res.len += string_copy(arena, LIT("props: 0x")).len;
for (u64 chunk_index = ARRAY_COUNT(ent->props); chunk_index-- > 0;) { for (u64 chunk_index = countof(ent->props); chunk_index-- > 0;) {
u64 chunk = ent->props[chunk_index]; u64 chunk = ent->props[chunk_index];
for (u64 part_index = 8; part_index-- > 0;) { for (u64 part_index = 8; part_index-- > 0;) {
if ((chunk_index != (ARRAY_COUNT(ent->props) - 1)) || ((chunk_index * 64) + (part_index * 8)) <= SEPROP_COUNT) { if ((chunk_index != (countof(ent->props) - 1)) || ((chunk_index * 64) + (part_index * 8)) <= SEPROP_COUNT) {
u8 part = (chunk >> (part_index * 8)) & 0xFF; u8 part = (chunk >> (part_index * 8)) & 0xFF;
string_from_char(arena, hex[(part >> 4) & 0x0F]); string_from_char(arena, hex[(part >> 4) & 0x0F]);
string_from_char(arena, hex[(part >> 0) & 0x0F]); string_from_char(arena, hex[(part >> 0) & 0x0F]);
@ -732,7 +732,7 @@ INTERNAL void user_update(void)
struct sys_event_array events = pop_sys_events(scratch.arena); struct sys_event_array events = pop_sys_events(scratch.arena);
/* Reset bind pressed / released states */ /* Reset bind pressed / released states */
for (u32 i = 0; i < ARRAY_COUNT(G.bind_states); ++i) { for (u32 i = 0; i < countof(G.bind_states); ++i) {
G.bind_states[i] = (struct bind_state) { G.bind_states[i] = (struct bind_state) {
.is_held = G.bind_states[i].is_held .is_held = G.bind_states[i].is_held
}; };
@ -1644,7 +1644,7 @@ INTERNAL void user_update(void)
struct collider_menkowski_simplex simplex = collider_res.simplex; struct collider_menkowski_simplex simplex = collider_res.simplex;
struct v2 simplex_points[] = { simplex.a.p, simplex.b.p, simplex.c.p }; struct v2 simplex_points[] = { simplex.a.p, simplex.b.p, simplex.c.p };
for (u64 i = 0; i < ARRAY_COUNT(simplex_points); ++i) simplex_points[i] = xform_mul_v2(G.world_to_user_xf, simplex_points[i]); for (u64 i = 0; i < countof(simplex_points); ++i) simplex_points[i] = xform_mul_v2(G.world_to_user_xf, simplex_points[i]);
struct v2_array simplex_array = { .count = simplex.len, .points = simplex_points }; struct v2_array simplex_array = { .count = simplex.len, .points = simplex_points };
if (simplex.len >= 1) { if (simplex.len >= 1) {
@ -1746,7 +1746,7 @@ INTERNAL void user_update(void)
struct v2 input_move_dir = ZI; struct v2 input_move_dir = ZI;
{ {
for (enum user_bind_kind bind = 0; bind < (i32)ARRAY_COUNT(G.bind_states); ++bind) { for (enum user_bind_kind bind = 0; bind < (i32)countof(G.bind_states); ++bind) {
struct bind_state state = G.bind_states[bind]; struct bind_state state = G.bind_states[bind];
if (!state.is_held && state.num_presses <= 0) { if (!state.is_held && state.num_presses <= 0) {