rename global 'L' vars to 'G'

This commit is contained in:
jacob 2024-04-11 12:50:28 -05:00
parent 02b31ab441
commit 2d593fee03
19 changed files with 666 additions and 666 deletions

View File

@ -25,7 +25,7 @@ GLOBAL struct {
struct arena arena;
struct string write_path;
struct sync_flag quit_sf;
} L = { 0 }, DEBUG_ALIAS(L, L_app);
} G = { 0 }, DEBUG_ALIAS(G, G_app);
/* ========================== *
* Write directory
@ -58,7 +58,7 @@ INTERNAL struct string initialize_write_directory(struct arena *arena, struct st
struct string app_write_path_cat(struct arena *arena, struct string filename)
{
return string_cat(arena, L.write_path, filename);
return string_cat(arena, G.write_path, filename);
}
/* ========================== *
@ -91,7 +91,7 @@ INTERNAL struct sys_window_settings default_window_settings(struct sys_window *w
void app_entry_point(void)
{
L.quit_sf = sync_flag_alloc();
G.quit_sf = sync_flag_alloc();
u32 worker_count = 4;
{
@ -110,8 +110,8 @@ void app_entry_point(void)
#endif
}
L.arena = arena_alloc(GIGABYTE(64));
L.write_path = initialize_write_directory(&L.arena, STR(WRITE_DIR));
G.arena = arena_alloc(GIGABYTE(64));
G.write_path = initialize_write_directory(&G.arena, STR(WRITE_DIR));
/* Startup logging */
{
@ -190,7 +190,7 @@ void app_entry_point(void)
sys_window_show(&window);
/* Wait for app_quit() */
sync_flag_wait(&L.quit_sf);
sync_flag_wait(&G.quit_sf);
/* Shutdown threaded systems */
/* FIXME: Only wait on threads for a certain period of time before
@ -226,5 +226,5 @@ void app_entry_point(void)
void app_quit(void)
{
sync_flag_set(&L.quit_sf);
sync_flag_set(&G.quit_sf);
}

View File

@ -29,7 +29,7 @@ GLOBAL struct {
u64 dbg_table_count;
struct sys_mutex dbg_table_mutex;
#endif
} L = { 0 }, DEBUG_ALIAS(L, L_asset_cache);
} G = { 0 }, DEBUG_ALIAS(G, G_asset_cache);
/* ========================== *
* Startup
@ -40,13 +40,13 @@ struct asset_cache_startup_receipt asset_cache_startup(struct work_startup_recei
(UNUSED)work_sr;
/* Init lookup */
L.lookup_rw_mutex = sys_rw_mutex_alloc();
G.lookup_rw_mutex = sys_rw_mutex_alloc();
/* Init store */
L.store_rw_mutex = sys_rw_mutex_alloc();
L.store_arena = arena_alloc(GIGABYTE(64));
G.store_rw_mutex = sys_rw_mutex_alloc();
G.store_arena = arena_alloc(GIGABYTE(64));
#if RTC
/* Init debug */
L.dbg_table_mutex = sys_mutex_alloc();
G.dbg_table_mutex = sys_mutex_alloc();
#endif
return (struct asset_cache_startup_receipt) { 0 };
@ -59,18 +59,18 @@ struct asset_cache_startup_receipt asset_cache_startup(struct work_startup_recei
INTERNAL void refresh_dbg_table(void)
{
#if RTC
sys_mutex_lock(&L.dbg_table_mutex);
sys_mutex_lock(&G.dbg_table_mutex);
{
MEMZERO_ARRAY(L.dbg_table);
L.dbg_table_count = 0;
for (u64 i = 0; i < ARRAY_COUNT(L.lookup); ++i) {
struct asset *asset = &L.lookup[i];
MEMZERO_ARRAY(G.dbg_table);
G.dbg_table_count = 0;
for (u64 i = 0; i < ARRAY_COUNT(G.lookup); ++i) {
struct asset *asset = &G.lookup[i];
if (asset->hash != 0) {
L.dbg_table[L.dbg_table_count++] = asset;
G.dbg_table[G.dbg_table_count++] = asset;
}
}
}
sys_mutex_unlock(&L.dbg_table_mutex);
sys_mutex_unlock(&G.dbg_table_mutex);
#endif
}
@ -78,9 +78,9 @@ INTERNAL void refresh_dbg_table(void)
* Check returned slot->hash != 0 for presence. */
INTERNAL struct asset *asset_cache_get_slot_assume_locked(struct string key, u64 hash)
{
u64 index = hash % ARRAY_COUNT(L.lookup);
u64 index = hash % ARRAY_COUNT(G.lookup);
while (true) {
struct asset *slot = &L.lookup[index];
struct asset *slot = &G.lookup[index];
if (slot->hash) {
/* Occupied */
if (hash == slot->hash && string_eq(key, slot->key)) {
@ -88,7 +88,7 @@ INTERNAL struct asset *asset_cache_get_slot_assume_locked(struct string key, u64
return slot;
} else {
++index;
if (index >= ARRAY_COUNT(L.lookup)) {
if (index >= ARRAY_COUNT(G.lookup)) {
index = 0;
}
}
@ -122,20 +122,20 @@ struct asset *asset_cache_touch(struct string key, u64 hash, b32 *is_first_touch
/* Lookup */
{
sys_rw_mutex_lock_shared(&L.lookup_rw_mutex);
sys_rw_mutex_lock_shared(&G.lookup_rw_mutex);
asset = asset_cache_get_slot_assume_locked(key, hash);
sys_rw_mutex_unlock_shared(&L.lookup_rw_mutex);
sys_rw_mutex_unlock_shared(&G.lookup_rw_mutex);
}
/* Insert if not found */
if (!asset->hash) {
sys_rw_mutex_lock_exclusive(&L.lookup_rw_mutex);
sys_rw_mutex_lock_exclusive(&G.lookup_rw_mutex);
/* Re-check asset presence in case it was inserted since lock */
asset = asset_cache_get_slot_assume_locked(key, hash);
if (!asset->hash) {
if (L.num_assets >= MAX_ASSETS) {
if (G.num_assets >= MAX_ASSETS) {
sys_panic(STR("Max assets reached"));
}
struct string key_stored = { 0 };
@ -157,12 +157,12 @@ struct asset *asset_cache_touch(struct string key, u64 hash, b32 *is_first_touch
if (is_first_touch) {
*is_first_touch = true;
}
++L.num_assets;
++G.num_assets;
refresh_dbg_table();
}
sys_rw_mutex_unlock_exclusive(&L.lookup_rw_mutex);
sys_rw_mutex_unlock_exclusive(&G.lookup_rw_mutex);
}
return asset;
@ -232,8 +232,8 @@ void *asset_cache_get_store_data(struct asset *asset)
struct asset_cache_store asset_cache_store_open(void)
{
struct asset_cache_store store = {
.rw_mutex = &L.store_rw_mutex,
.arena = &L.store_arena
.rw_mutex = &G.store_rw_mutex,
.arena = &G.store_arena
};
sys_rw_mutex_lock_exclusive(store.rw_mutex);
return store;

View File

@ -270,7 +270,7 @@ typedef u64 umm;
#define U8_MAX (0xFF)
#define U16_MAX (0xFFFF)
#define U32_MAX (0xFFFFFFFF)
#define U64_MAX (0xFFFFFFFFFFFFFFFFULL)
#define U64_MAX (0xFFFFFFFFFFFFFFFFULG)
#define I8_MAX (0x7F)
#define I16_MAX (0x7FFF)

View File

@ -6,7 +6,7 @@
GLOBAL struct {
struct renderer_handle solid_white;
} L = { 0 }, DEBUG_ALIAS(L, L_draw);
} G = { 0 }, DEBUG_ALIAS(G, G_draw);
/* ========================== *
* Startup
@ -27,7 +27,7 @@ struct draw_startup_receipt draw_startup(struct renderer_startup_receipt *render
.pixels = arena_push(scratch.arena, u32)
};
image_data.pixels[0] = COLOR_WHITE;
L.solid_white = renderer_texture_alloc(image_data);
G.solid_white = renderer_texture_alloc(image_data);
scratch_end(scratch);
}
@ -130,7 +130,7 @@ void draw_solid_poly(struct renderer_canvas *canvas, struct v2_array array, u32
return;
}
renderer_canvas_ensure_texture_cmd(canvas, (struct texture_shader_parameters) { .texture = L.solid_white });
renderer_canvas_ensure_texture_cmd(canvas, (struct texture_shader_parameters) { .texture = G.solid_white });
draw_solid_poly_internal(canvas, array, color);
}
@ -158,13 +158,13 @@ void draw_solid_circle(struct renderer_canvas *canvas, struct v2 pos, f32 radius
void draw_solid_quad(struct renderer_canvas *canvas, struct quad quad, u32 color)
{
renderer_canvas_ensure_texture_cmd(canvas, (struct texture_shader_parameters) { .texture = L.solid_white });
renderer_canvas_ensure_texture_cmd(canvas, (struct texture_shader_parameters) { .texture = G.solid_white });
draw_texture_quad_internal(canvas, CLIP_ALL, color, quad);
}
void draw_solid_rect(struct renderer_canvas *canvas, struct rect rect, u32 color)
{
renderer_canvas_ensure_texture_cmd(canvas, (struct texture_shader_parameters) { .texture = L.solid_white });
renderer_canvas_ensure_texture_cmd(canvas, (struct texture_shader_parameters) { .texture = G.solid_white });
struct quad quad = quad_from_rect(rect);
draw_texture_quad_internal(canvas, CLIP_ALL, color, quad);
}
@ -175,14 +175,14 @@ void draw_solid_rect(struct renderer_canvas *canvas, struct rect rect, u32 color
void draw_solid_line(struct renderer_canvas *canvas, struct v2 start, struct v2 end, f32 thickness, u32 color)
{
renderer_canvas_ensure_texture_cmd(canvas, (struct texture_shader_parameters) { .texture = L.solid_white });
renderer_canvas_ensure_texture_cmd(canvas, (struct texture_shader_parameters) { .texture = G.solid_white });
struct quad quad = quad_from_line(start, end, thickness);
draw_texture_quad_internal(canvas, CLIP_ALL, color, quad);
}
void draw_solid_ray(struct renderer_canvas *canvas, struct v2 pos, struct v2 rel, f32 thickness, u32 color)
{
renderer_canvas_ensure_texture_cmd(canvas, (struct texture_shader_parameters) { .texture = L.solid_white });
renderer_canvas_ensure_texture_cmd(canvas, (struct texture_shader_parameters) { .texture = G.solid_white });
struct quad quad = quad_from_ray(pos, rel, thickness);
draw_texture_quad_internal(canvas, CLIP_ALL, color, quad);
}
@ -193,7 +193,7 @@ void draw_solid_poly_line(struct renderer_canvas *canvas, struct v2_array array,
return;
}
renderer_canvas_ensure_texture_cmd(canvas, (struct texture_shader_parameters) { .texture = L.solid_white });
renderer_canvas_ensure_texture_cmd(canvas, (struct texture_shader_parameters) { .texture = G.solid_white });
for (u64 i = 1; i < array.count; ++i) {
struct v2 p1 = array.points[i - 1];
struct v2 p2 = array.points[i];
@ -223,7 +223,7 @@ void draw_solid_rect_line(struct renderer_canvas *canvas, struct rect rect, f32
void draw_solid_arrow_line(struct renderer_canvas *canvas, struct v2 start, struct v2 end, f32 thickness, f32 arrowhead_height, u32 color)
{
renderer_canvas_ensure_texture_cmd(canvas, (struct texture_shader_parameters) { .texture = L.solid_white });
renderer_canvas_ensure_texture_cmd(canvas, (struct texture_shader_parameters) { .texture = G.solid_white });
const f32 head_width_ratio = 0.5f; /* Width of arrowhead relative to its length */

View File

@ -46,7 +46,7 @@ struct font_task_params_store {
GLOBAL struct {
struct font_task_params_store params;
} L = { 0 }, DEBUG_ALIAS(L, L_font);
} G = { 0 }, DEBUG_ALIAS(G, G_font);
/* ========================== *
* Startup
@ -64,8 +64,8 @@ struct font_startup_receipt font_startup(struct work_startup_receipt *work_sr,
(UNUSED)ttf_sr;
(UNUSED)resource_sr;
L.params.arena = arena_alloc(GIGABYTE(64));
L.params.mutex = sys_mutex_alloc();
G.params.arena = arena_alloc(GIGABYTE(64));
G.params.mutex = sys_mutex_alloc();
#if FONT_TEST
for (u64 i = 0; i < ARRAY_COUNT(g_font_codes); ++i) {
@ -83,27 +83,27 @@ struct font_startup_receipt font_startup(struct work_startup_receipt *work_sr,
INTERNAL struct font_task_params *font_task_params_alloc(void)
{
struct font_task_params *p = NULL;
sys_mutex_lock(&L.params.mutex);
sys_mutex_lock(&G.params.mutex);
{
if (L.params.head_free) {
p = L.params.head_free;
L.params.head_free = p->next_free;
if (G.params.head_free) {
p = G.params.head_free;
G.params.head_free = p->next_free;
} else {
p = arena_push_zero(&L.params.arena, struct font_task_params);
p = arena_push_zero(&G.params.arena, struct font_task_params);
}
}
sys_mutex_unlock(&L.params.mutex);
sys_mutex_unlock(&G.params.mutex);
return p;
}
INTERNAL void font_task_params_release(struct font_task_params *p)
{
sys_mutex_lock(&L.params.mutex);
sys_mutex_lock(&G.params.mutex);
{
p->next_free = L.params.head_free;
L.params.head_free = p;
p->next_free = G.params.head_free;
G.params.head_free = p;
}
sys_mutex_unlock(&L.params.mutex);
sys_mutex_unlock(&G.params.mutex);
}
/* ========================== *
@ -306,7 +306,7 @@ struct font_task_params_store {
GLOBAL struct {
struct font_task_params_store params;
} L = { 0 }, DEBUG_ALIAS(L, L_font);
} G = { 0 }, DEBUG_ALIAS(G, L_font);
/* ========================== *
* Startup
@ -324,8 +324,8 @@ struct font_startup_receipt font_startup(struct work_startup_receipt *work_sr,
(UNUSED)ttf_sr;
(UNUSED)resource_sr;
L.params.arena = arena_alloc(GIGABYTE(64));
L.params.mutex = sys_mutex_alloc();
G.params.arena = arena_alloc(GIGABYTE(64));
G.params.mutex = sys_mutex_alloc();
#if FONT_TEST
for (u64 i = 0; i < ARRAY_COUNT(g_font_codes); ++i) {
@ -343,27 +343,27 @@ struct font_startup_receipt font_startup(struct work_startup_receipt *work_sr,
INTERNAL struct font_task_params *font_task_params_alloc(void)
{
struct font_task_params *p = NULL;
sys_mutex_lock(&L.params.mutex);
sys_mutex_lock(&G.params.mutex);
{
if (L.params.head_free) {
p = L.params.head_free;
L.params.head_free = p->next_free;
if (G.params.head_free) {
p = G.params.head_free;
G.params.head_free = p->next_free;
} else {
p = arena_push_zero(&L.params.arena, struct font_task_params);
p = arena_push_zero(&G.params.arena, struct font_task_params);
}
}
sys_mutex_unlock(&L.params.mutex);
sys_mutex_unlock(&G.params.mutex);
return p;
}
INTERNAL void font_task_params_release(struct font_task_params *p)
{
sys_mutex_lock(&L.params.mutex);
sys_mutex_lock(&G.params.mutex);
{
p->next_free = L.params.head_free;
L.params.head_free = p;
p->next_free = G.params.head_free;
G.params.head_free = p;
}
sys_mutex_unlock(&L.params.mutex);
sys_mutex_unlock(&G.params.mutex);
}
/* ========================== *

View File

@ -23,7 +23,7 @@ GLOBAL struct {
struct sys_mutex published_tick_mutex;
struct world published_tick;
struct atomic_u64 published_tick_id;
} L = { 0 }, DEBUG_ALIAS(L, L_game);
} G = { 0 }, DEBUG_ALIAS(G, G_game);
/* ========================== *
* Game cmd
@ -31,28 +31,28 @@ GLOBAL struct {
INTERNAL void push_cmds(struct game_cmd_array cmd_array)
{
sys_mutex_lock(&L.game_cmds_mutex);
sys_mutex_lock(&G.game_cmds_mutex);
{
struct game_cmd *cmds = arena_push_array(&L.game_cmds_arena, struct game_cmd, cmd_array.count);
struct game_cmd *cmds = arena_push_array(&G.game_cmds_arena, struct game_cmd, cmd_array.count);
MEMCPY(cmds, cmd_array.cmds, cmd_array.count * sizeof(*cmds));
}
sys_mutex_unlock(&L.game_cmds_mutex);
sys_mutex_unlock(&G.game_cmds_mutex);
}
INTERNAL struct game_cmd_array pop_cmds(struct arena *arena)
{
struct game_cmd_array array = { 0 };
if (L.game_cmds_arena.pos > 0) {
sys_mutex_lock(&L.game_cmds_mutex);
if (G.game_cmds_arena.pos > 0) {
sys_mutex_lock(&G.game_cmds_mutex);
{
struct buffer game_cmds_buff = arena_to_buffer(&L.game_cmds_arena);
struct buffer game_cmds_buff = arena_to_buffer(&G.game_cmds_arena);
arena_align(arena, alignof(struct game_cmd));
array.cmds = (struct game_cmd *)arena_push_array(arena, u8, game_cmds_buff.size);
array.count = game_cmds_buff.size / sizeof(struct game_cmd);
MEMCPY(array.cmds, game_cmds_buff.data, game_cmds_buff.size);
arena_reset(&L.game_cmds_arena);
arena_reset(&G.game_cmds_arena);
}
sys_mutex_unlock(&L.game_cmds_mutex);
sys_mutex_unlock(&G.game_cmds_mutex);
}
return array;
}
@ -64,12 +64,12 @@ INTERNAL struct game_cmd_array pop_cmds(struct arena *arena)
INTERNAL void publish_game_tick(void)
{
__prof;
sys_mutex_lock(&L.published_tick_mutex);
sys_mutex_lock(&G.published_tick_mutex);
{
world_copy_replace(&L.published_tick, &L.world);
atomic_u64_eval_exchange(&L.published_tick_id, L.published_tick.tick_id);
world_copy_replace(&G.published_tick, &G.world);
atomic_u64_eval_exchange(&G.published_tick_id, G.published_tick.tick_id);
}
sys_mutex_unlock(&L.published_tick_mutex);
sys_mutex_unlock(&G.published_tick_mutex);
}
INTERNAL void recalculate_world_xform_recurse(struct entity *parent)
@ -96,14 +96,14 @@ INTERNAL void recalculate_world_xform_recurse(struct entity *parent)
child->world_xform = world_xform;
/* Append sub-children to stack */
struct entity *subchild = entity_from_handle(&L.world.entity_store, child->last);
struct entity *subchild = entity_from_handle(&G.world.entity_store, child->last);
while (subchild->valid) {
*arena_push(scratch.arena, struct stack_node) = (struct stack_node) {
.entity = subchild,
.parent_xform = world_xform
};
++stack_count;
subchild = entity_from_handle(&L.world.entity_store, subchild->prev);
subchild = entity_from_handle(&G.world.entity_store, subchild->prev);
}
}
@ -129,7 +129,7 @@ INTERNAL void game_update(void)
struct v2 size = V2(1, 1);
f32 r = 0;
struct entity *e = entity_alloc(&L.world.entity_store);
struct entity *e = entity_alloc(&G.world.entity_store);
e->valid = true;
e->rel_xform = XFORM_TRS(.t = pos, .r = r, .s = size);
@ -180,7 +180,7 @@ INTERNAL void game_update(void)
struct v2 size = V2(1, 1);
f32 r = 0;
struct entity *e = entity_alloc(&L.world.entity_store);
struct entity *e = entity_alloc(&G.world.entity_store);
e->valid = true;
e->rel_xform = XFORM_TRS(.t = pos, .r = r, .s = size);
@ -219,11 +219,11 @@ INTERNAL void game_update(void)
//entity_enable_prop(e, ENTITY_PROP_TEST);
entity_link(&L.world.entity_store, parent, e);
entity_link(&G.world.entity_store, parent, e);
if (sys_rand_u32() % 2 == 0) {
u64 parent_idx = sys_rand_u32() % entity_store_as_array(&L.world.entity_store).count;
struct entity *rand_ent = entity_from_handle(&L.world.entity_store, (struct entity_handle) { .idx = parent_idx, .gen = 1 });
u64 parent_idx = sys_rand_u32() % entity_store_as_array(&G.world.entity_store).count;
struct entity *rand_ent = entity_from_handle(&G.world.entity_store, (struct entity_handle) { .idx = parent_idx, .gen = 1 });
if (rand_ent->valid) {
parent = rand_ent;
} else {
@ -234,7 +234,7 @@ INTERNAL void game_update(void)
/* Camera ent */
{
struct entity *e = entity_alloc(&L.world.entity_store);
struct entity *e = entity_alloc(&G.world.entity_store);
e->valid = true;
e->rel_xform = XFORM_IDENT;
@ -250,7 +250,7 @@ INTERNAL void game_update(void)
/* Sound ent */
{
struct entity *e = entity_alloc(&L.world.entity_store);
struct entity *e = entity_alloc(&G.world.entity_store);
e->valid = true;
e->rel_xform = XFORM_POS(V2(-3, -3));
@ -273,11 +273,11 @@ INTERNAL void game_update(void)
}
}
++L.world.tick_id;
L.world.tick_ts = sys_timestamp();
L.world.dt = max_f64(0.0, (1.0 / GAME_FPS) * L.world.timescale);
L.world.time += L.world.dt;
struct entity_array entities_array = entity_store_as_array(&L.world.entity_store);
++G.world.tick_id;
G.world.tick_ts = sys_timestamp();
G.world.dt = max_f64(0.0, (1.0 / GAME_FPS) * G.world.timescale);
G.world.time += G.world.dt;
struct entity_array entities_array = entity_store_as_array(&G.world.entity_store);
/* ========================== *
* Process game cmds
@ -290,8 +290,8 @@ INTERNAL void game_update(void)
switch (cmd.kind) {
/* Movement */
case GAME_CMD_KIND_PLAYER_MOVE: {
L.world.player_move_dir = cmd.move_dir;
L.world.player_aim = cmd.aim;
G.world.player_move_dir = cmd.move_dir;
G.world.player_aim = cmd.aim;
} break;
/* Clear level */
@ -299,7 +299,7 @@ INTERNAL void game_update(void)
for (u64 i = 0; i < entities_array.count; ++i) {
struct entity *ent = &entities_array.entities[i];
if (ent->valid) {
entity_release(&L.world.entity_store, ent);
entity_release(&G.world.entity_store, ent);
}
}
} break;
@ -308,9 +308,9 @@ INTERNAL void game_update(void)
};
}
if (v2_len(L.world.player_move_dir) > 1.f) {
if (v2_len(G.world.player_move_dir) > 1.f) {
/* Clamp movement magnitude */
L.world.player_move_dir = v2_norm(L.world.player_move_dir);
G.world.player_move_dir = v2_norm(G.world.player_move_dir);
}
/* ---------------------------------------------------------------------- */
@ -340,7 +340,7 @@ INTERNAL void game_update(void)
* ========================== */
if (entity_has_prop(ent, ENTITY_PROP_ANIMATING)) {
f64 time_in_frame = ent->animation_time_in_frame + L.world.dt;
f64 time_in_frame = ent->animation_time_in_frame + G.world.dt;
u64 tag_frame_offset = ent->animation_frame;
struct sheet *sheet = sheet_load(ent->sprite_name);
@ -371,7 +371,7 @@ INTERNAL void game_update(void)
/* ENTITY_PROP_TEST */
if (entity_has_prop(ent, ENTITY_PROP_TEST)) {
f32 t = ((f32)L.world.time);
f32 t = ((f32)G.world.time);
struct v2 og = v2_mul(V2(math_cos(t), math_sin(t)), 3);
f32 r = t + PI / 2;
struct v2 s = V2(1 + (math_fabs(math_sin(t * 5)) * 3), 1);
@ -407,7 +407,7 @@ INTERNAL void game_update(void)
f32 max_speed = ent->player_max_speed;
f32 acceleration_rate = ent->player_acceleration;
acceleration_rate = clamp_f32(acceleration_rate, 0, GAME_FPS); /* Can't integrate acceleration rate higher than FPS */
struct v2 target_velocity = v2_mul(L.world.player_move_dir, max_speed);
struct v2 target_velocity = v2_mul(G.world.player_move_dir, max_speed);
struct v2 target_acceleration = v2_sub(target_velocity, ent->velocity);
ent->acceleration = v2_mul(target_acceleration, acceleration_rate);
}
@ -417,7 +417,7 @@ INTERNAL void game_update(void)
* ========================== */
{
f32 dt = (f32)L.world.dt;
f32 dt = (f32)G.world.dt;
/* Apply acceleration to velocity */
struct v2 a = v2_mul(ent->acceleration, dt);
@ -433,7 +433,7 @@ INTERNAL void game_update(void)
if (entity_has_prop(ent, ENTITY_PROP_PLAYER_CONTROLLED)) {
/* Update aim */
ent->player_aim = L.world.player_aim;
ent->player_aim = G.world.player_aim;
/* Update view angle */
struct v2 ent_pos = ent->rel_xform.og;
@ -463,7 +463,7 @@ INTERNAL void game_update(void)
/* Camera follow */
if (entity_has_prop(ent, ENTITY_PROP_CAMERA)) {
struct entity *follow = entity_from_handle(&L.world.entity_store, ent->camera_follow);
struct entity *follow = entity_from_handle(&G.world.entity_store, ent->camera_follow);
if (entity_has_prop(follow, ENTITY_PROP_PLAYER_CONTROLLED)) {
#if 0
@ -496,7 +496,7 @@ INTERNAL void game_update(void)
/* Lerp camera */
if (ent->camera_applied_lerp_continuity_gen_plus_one == ent->camera_lerp_continuity_gen + 1) {
f32 t = 1 - math_pow(2.f, -20.f * (f32)L.world.dt);
f32 t = 1 - math_pow(2.f, -20.f * (f32)G.world.dt);
ent->rel_xform = xform_lerp(ent->rel_xform, ent->camera_rel_xform_target, t);
} else {
/* Skip lerp */
@ -514,7 +514,7 @@ INTERNAL void game_update(void)
if (entity_has_prop(ent, ENTITY_PROP_TEST_SOUND_EMITTER)) {
struct mixer_desc desc = ent->sound_desc;
desc.speed = L.world.timescale;
desc.speed = G.world.timescale;
desc.pos = ent->world_xform.og;
struct sound *sound = sound_load_async(ent->sound_name, 0);
@ -548,7 +548,7 @@ INTERNAL SYS_THREAD_FUNC_DEF(game_thread_entry_point, arg)
(UNUSED)arg;
sys_timestamp_t last_frame_ts = 0;
f64 target_dt = GAME_FPS > (0) ? (1.0 / GAME_FPS) : 0;
while (!L.shutdown) {
while (!G.shutdown) {
__profscope(game_update_w_sleep);
sleep_frame(last_frame_ts, target_dt);
last_frame_ts = sys_timestamp();
@ -565,27 +565,27 @@ struct game_startup_receipt game_startup(struct mixer_startup_receipt *mixer_sr,
(UNUSED)sound_sr;
/* Initialize game cmd storage */
L.game_cmds_mutex = sys_mutex_alloc();
L.game_cmds_arena = arena_alloc(GIGABYTE(64));
G.game_cmds_mutex = sys_mutex_alloc();
G.game_cmds_arena = arena_alloc(GIGABYTE(64));
/* Initialize world */
world_alloc(&L.world);
world_alloc(&G.world);
/* Initialize tick transmission */
world_alloc(&L.published_tick);
L.published_tick_mutex = sys_mutex_alloc();
world_alloc(&G.published_tick);
G.published_tick_mutex = sys_mutex_alloc();
L.world.timescale = GAME_TIMESCALE;
L.game_thread = sys_thread_init(&game_thread_entry_point, NULL, STR("[P2] Game thread"));
G.world.timescale = GAME_TIMESCALE;
G.game_thread = sys_thread_init(&game_thread_entry_point, NULL, STR("[P2] Game thread"));
return (struct game_startup_receipt) { 0 };
}
void game_shutdown(void)
{
L.shutdown = true;
sys_thread_join(&L.game_thread);
G.shutdown = true;
sys_thread_join(&G.game_thread);
}
/* ========================== *
@ -594,16 +594,16 @@ void game_shutdown(void)
void game_get_latest_tick(struct world *dest)
{
sys_mutex_lock(&L.published_tick_mutex);
sys_mutex_lock(&G.published_tick_mutex);
{
world_copy_replace(dest, &L.published_tick);
world_copy_replace(dest, &G.published_tick);
}
sys_mutex_unlock(&L.published_tick_mutex);
sys_mutex_unlock(&G.published_tick_mutex);
}
u64 game_get_latest_tick_id(void)
{
return atomic_u64_eval(&L.published_tick_id);
return atomic_u64_eval(&G.published_tick_id);
}
void game_push_cmds(struct game_cmd_array cmd_array)

View File

@ -20,7 +20,7 @@ GLOBAL struct {
log_event_callback_func *callbacks_head;
struct sys_file file;
b32 file_valid;
} L = { 0 }, DEBUG_ALIAS(L, L_log);
} G = { 0 }, DEBUG_ALIAS(G, G_log);
GLOBAL READONLY const struct log_level_settings g_log_level_settings[LOG_LEVEL_COUNT] = {
[LOG_LEVEL_CRITICAL] = {
@ -55,18 +55,18 @@ GLOBAL READONLY const struct log_level_settings g_log_level_settings[LOG_LEVEL_C
struct log_startup_receipt log_startup(struct string logfile_path)
{
L.mutex = sys_mutex_alloc();
L.arena = arena_alloc(GIGABYTE(64));
G.mutex = sys_mutex_alloc();
G.arena = arena_alloc(GIGABYTE(64));
if (logfile_path.len > 0) {
/* Create / wipe log file */
sys_file_close(sys_file_open_write(logfile_path));
/* Keep log file open for appending */
if (sys_is_file(logfile_path)) {
L.file = sys_file_open_append(logfile_path);
L.file_valid = true;
G.file = sys_file_open_append(logfile_path);
G.file_valid = true;
}
}
atomic_i32_eval_exchange(&L.initialized, 1);
atomic_i32_eval_exchange(&G.initialized, 1);
return (struct log_startup_receipt) { 0 };
}
@ -76,13 +76,13 @@ struct log_startup_receipt log_startup(struct string logfile_path)
void log_register_callback(log_event_callback_func *func)
{
if (!atomic_i32_eval(&L.initialized)) { return; }
sys_mutex_lock(&L.mutex);
if (!atomic_i32_eval(&G.initialized)) { return; }
sys_mutex_lock(&G.mutex);
{
/* TODO */
(UNUSED)func;
}
sys_mutex_unlock(&L.mutex);
sys_mutex_unlock(&G.mutex);
}
/* ========================== *
@ -92,12 +92,12 @@ void log_register_callback(log_event_callback_func *func)
INTERNAL void append_to_logfile(struct string msg)
{
__prof;
if (!atomic_i32_eval(&L.initialized)) { return; }
if (!atomic_i32_eval(&G.initialized)) { return; }
if (L.file_valid) {
if (G.file_valid) {
struct temp_arena scratch = scratch_begin_no_conflict();
struct string msg_line = string_cat(scratch.arena, msg, STR("\n"));
sys_file_write(L.file, BUFFER_FROM_STRING(msg_line));
sys_file_write(G.file, BUFFER_FROM_STRING(msg_line));
scratch_end(scratch);
}
}
@ -109,7 +109,7 @@ void _log(i32 level, struct string msg)
#endif
{
__prof;
if (!atomic_i32_eval(&L.initialized)) { return; }
if (!atomic_i32_eval(&G.initialized)) { return; }
if (level < 0 || level >= LOG_LEVEL_COUNT) {
sys_panic_raw("Invalid log level");
@ -179,7 +179,7 @@ void _logfv(i32 level, struct string file, u32 line, struct string fmt, va_list
void _logfv(i32 level, struct string fmt, va_list args)
#endif
{
if (!atomic_i32_eval(&L.initialized)) { return; }
if (!atomic_i32_eval(&G.initialized)) { return; }
struct temp_arena scratch = scratch_begin_no_conflict();
struct string msg = string_formatv(scratch.arena, fmt, args);
#if LOG_INCLUDE_SOURCE_LOCATION
@ -196,7 +196,7 @@ void _logf(i32 level, struct string file, u32 line, struct string fmt, ...)
void _logf(i32 level, struct string fmt, ...)
#endif
{
if (!atomic_i32_eval(&L.initialized)) { return; }
if (!atomic_i32_eval(&G.initialized)) { return; }
va_list args;
va_start(args, fmt);
#if LOG_INCLUDE_SOURCE_LOCATION

View File

@ -64,7 +64,7 @@ GLOBAL struct {
struct track *track_last_playing;
u64 track_playing_count;
struct track *track_first_free;
} L = { 0 }, DEBUG_ALIAS(L, L_mixer);
} G = { 0 }, DEBUG_ALIAS(G, G_mixer);
/* ========================== *
* Startup
@ -72,10 +72,10 @@ GLOBAL struct {
struct mixer_startup_receipt mixer_startup(void)
{
L.track_arena = arena_alloc(GIGABYTE(64));
L.mutex = sys_mutex_alloc();
L.listener_pos = V2(0, 0);
L.listener_dir = V2(0, -1);
G.track_arena = arena_alloc(GIGABYTE(64));
G.mutex = sys_mutex_alloc();
G.listener_pos = V2(0, 0);
G.listener_dir = V2(0, -1);
return (struct mixer_startup_receipt) { 0 };
}
@ -104,21 +104,21 @@ INTERNAL struct track *track_from_handle(struct mixer_track_handle handle)
INTERNAL struct track *track_alloc_assume_locked(struct sound *sound)
{
sys_mutex_assert_locked(&L.mutex);
sys_mutex_assert_locked(&G.mutex);
struct track *track = NULL;
if (L.track_first_free) {
if (G.track_first_free) {
/* Take from free list */
track = L.track_first_free;
track = G.track_first_free;
struct track *next_free = track->next;
L.track_first_free = next_free;
G.track_first_free = next_free;
if (next_free) {
next_free->prev = NULL;
}
*track = (struct track) { 0 };
} else {
/* Allocate new */
track = arena_push_zero(&L.track_arena, struct track);
track = arena_push_zero(&G.track_arena, struct track);
track->gen = 1;
}
@ -127,22 +127,22 @@ INTERNAL struct track *track_alloc_assume_locked(struct sound *sound)
track->mix.track_handle = track_to_handle(track);
/* Append to playing list */
struct track *prev = L.track_last_playing;
struct track *prev = G.track_last_playing;
if (prev) {
prev->next = track;
} else {
L.track_first_playing = track;
G.track_first_playing = track;
}
L.track_last_playing = track;
G.track_last_playing = track;
track->prev = prev;
++L.track_playing_count;
++G.track_playing_count;
return track;
}
INTERNAL void track_release_assume_locked(struct track *track)
{
sys_mutex_assert_locked(&L.mutex);
sys_mutex_assert_locked(&G.mutex);
/* Remove from playing list */
struct track *prev = track->prev;
@ -151,24 +151,24 @@ INTERNAL void track_release_assume_locked(struct track *track)
prev->next = next;
} else {
/* Track was first in list */
L.track_first_playing = next;
G.track_first_playing = next;
}
if (next) {
next->prev = prev;
} else {
/* Track was last in list */
L.track_last_playing = prev;
G.track_last_playing = prev;
}
--L.track_playing_count;
--G.track_playing_count;
++track->gen;
/* Add to free list */
track->prev = NULL;
track->next = L.track_first_free;
if (L.track_first_free) {
L.track_first_free->prev = track;
track->next = G.track_first_free;
if (G.track_first_free) {
G.track_first_free->prev = track;
}
L.track_first_free = track;
G.track_first_free = track;
}
/* ========================== *
@ -187,12 +187,12 @@ struct mixer_track_handle mixer_play_ex(struct sound *sound, struct mixer_desc d
{
struct track *track;
{
sys_mutex_lock(&L.mutex);
sys_mutex_lock(&G.mutex);
{
track = track_alloc_assume_locked(sound);
track->desc = desc;
}
sys_mutex_unlock(&L.mutex);
sys_mutex_unlock(&G.mutex);
}
return track_to_handle(track);
}
@ -205,7 +205,7 @@ struct mixer_desc mixer_track_get(struct mixer_track_handle handle)
struct track *track = track_from_handle(handle);
if (track) {
/* TODO: Only lock mutex on track itself or something */
sys_mutex_lock(&L.mutex);
sys_mutex_lock(&G.mutex);
{
/* Confirm handle is still valid now that we're locked */
track = track_from_handle(handle);
@ -213,7 +213,7 @@ struct mixer_desc mixer_track_get(struct mixer_track_handle handle)
res = track->desc;
}
}
sys_mutex_unlock(&L.mutex);
sys_mutex_unlock(&G.mutex);
}
return res;
@ -225,7 +225,7 @@ void mixer_track_set(struct mixer_track_handle handle, struct mixer_desc desc)
struct track *track = track_from_handle(handle);
if (track) {
/* TODO: Only lock mutex on track itself or something */
sys_mutex_lock(&L.mutex);
sys_mutex_lock(&G.mutex);
{
/* Confirm handle is still valid now that we're locked */
track = track_from_handle(handle);
@ -233,18 +233,18 @@ void mixer_track_set(struct mixer_track_handle handle, struct mixer_desc desc)
track->desc = desc;
}
}
sys_mutex_unlock(&L.mutex);
sys_mutex_unlock(&G.mutex);
}
}
void mixer_set_listener(struct v2 pos, struct v2 dir)
{
sys_mutex_lock(&L.mutex);
sys_mutex_lock(&G.mutex);
{
L.listener_pos = pos;
L.listener_dir = v2_norm(dir);
G.listener_pos = pos;
G.listener_dir = v2_norm(dir);
}
sys_mutex_unlock(&L.mutex);
sys_mutex_unlock(&G.mutex);
}
/* ========================== *
@ -279,22 +279,22 @@ struct mixed_pcm_f32 mixer_update(struct arena *arena, u64 frame_count)
/* Create temp array of mixes */
struct mix **mixes = NULL;
u64 mixes_count = 0;
sys_mutex_lock(&L.mutex);
sys_mutex_lock(&G.mutex);
{
/* Read listener info */
listener_pos = L.listener_pos;
listener_dir = L.listener_dir;
listener_pos = G.listener_pos;
listener_dir = G.listener_dir;
/* Update & read mixes */
mixes = arena_push_array(scratch.arena, struct mix *, L.track_playing_count);
for (struct track *track = L.track_first_playing; track; track = track->next) {
mixes = arena_push_array(scratch.arena, struct mix *, G.track_playing_count);
for (struct track *track = G.track_first_playing; track; track = track->next) {
__profscope(prepare_track);
struct mix *mix = &track->mix;
mix->desc = track->desc;
mixes[mixes_count++] = mix;
}
}
sys_mutex_unlock(&L.mutex);
sys_mutex_unlock(&G.mutex);
for (u64 mix_index = 0; mix_index < mixes_count; ++mix_index) {
__profscope(mix_track);
@ -463,7 +463,7 @@ struct mixed_pcm_f32 mixer_update(struct arena *arena, u64 frame_count)
}
}
sys_mutex_lock(&L.mutex);
sys_mutex_lock(&G.mutex);
{
__profscope(update_track_effect_data);
for (u64 i = 0; i < mixes_count; ++i) {
@ -477,7 +477,7 @@ struct mixed_pcm_f32 mixer_update(struct arena *arena, u64 frame_count)
}
}
}
sys_mutex_unlock(&L.mutex);
sys_mutex_unlock(&G.mutex);
scratch_end(scratch);
return res;

View File

@ -42,7 +42,7 @@ GLOBAL struct {
WAVEFORMATEX *buffer_format;
u32 buffer_frames;
HANDLE mmtc_handle;
} L = { 0 }, DEBUG_ALIAS(L, L_playback_wasapi);
} G = { 0 }, DEBUG_ALIAS(G, G_playback_wasapi);
/* ========================== *
* Initialize
@ -52,7 +52,7 @@ INTERNAL void wasapi_initialize(void)
{
/* https://learn.microsoft.com/en-us/windows/win32/procthread/multimedia-class-scheduler-service#registry-settings */
DWORD task = 0;
L.mmtc_handle = AvSetMmThreadCharacteristicsW(L"Pro Audio", &task);
G.mmtc_handle = AvSetMmThreadCharacteristicsW(L"Pro Audio", &task);
u64 sample_rate = PLAYBACK_SAMPLE_RATE;
u64 channel_count = 2;
@ -68,7 +68,7 @@ INTERNAL void wasapi_initialize(void)
IMMDeviceEnumerator_Release(enumerator);
/* Create audio client for device */
IMMDevice_Activate(device, &IID_IAudioClient, CLSCTX_ALL, NULL, (LPVOID *)&L.client);
IMMDevice_Activate(device, &IID_IAudioClient, CLSCTX_ALL, NULL, (LPVOID *)&G.client);
IMMDevice_Release(device);
WAVEFORMATEXTENSIBLE format_ex = {
@ -91,7 +91,7 @@ INTERNAL void wasapi_initialize(void)
#if 1
b32 client_initialized = FALSE;
IAudioClient3 *client3;
if (SUCCEEDED(IAudioClient_QueryInterface(L.client, &IID_IAudioClient3, (LPVOID *)&client3))) {
if (SUCCEEDED(IAudioClient_QueryInterface(G.client, &IID_IAudioClient3, (LPVOID *)&client3))) {
/* From Martins: Minimum buffer size will typically be 480 samples (10msec @ 48khz)
* but it can be 128 samples (2.66 msec @ 48khz) if driver is properly installed
* see bullet-point instructions here: https://learn.microsoft.com/en-us/windows-hardware/drivers/audio/low-latency-audio#measurement-tools
@ -113,7 +113,7 @@ INTERNAL void wasapi_initialize(void)
if (!client_initialized) {
/* Get duration for shared-mode streams, this will typically be 480 samples (10msec @ 48khz) */
REFERENCE_TIME duration;
IAudioClient_GetDevicePeriod(L.client, &duration, NULL);
IAudioClient_GetDevicePeriod(G.client, &duration, NULL);
/* Initialize audio playback
*
@ -123,23 +123,23 @@ INTERNAL void wasapi_initialize(void)
* but allows for any input format.
*/
const DWORD flags = AUDCLNT_STREAMFLAGS_EVENTCALLBACK | AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM | AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY;
IAudioClient_Initialize(L.client, AUDCLNT_SHAREMODE_SHARED, flags, duration, 0, wfx, NULL);
IAudioClient_Initialize(G.client, AUDCLNT_SHAREMODE_SHARED, flags, duration, 0, wfx, NULL);
}
IAudioClient_GetMixFormat(L.client, &L.buffer_format);
IAudioClient_GetMixFormat(G.client, &G.buffer_format);
/* Set up event handler to wait on */
L.event = CreateEventW(NULL, FALSE, FALSE, NULL);
IAudioClient_SetEventHandle(L.client, L.event);
G.event = CreateEventW(NULL, FALSE, FALSE, NULL);
IAudioClient_SetEventHandle(G.client, G.event);
/* Get playback client */
IAudioClient_GetService(L.client, &IID_IAudioRenderClient, (LPVOID *)&L.playback);
IAudioClient_GetService(G.client, &IID_IAudioRenderClient, (LPVOID *)&G.playback);
/* Start the playback */
IAudioClient_Start(L.client);
IAudioClient_Start(G.client);
/* Get audio buffer size in samples */
IAudioClient_GetBufferSize(L.client, &L.buffer_frames);
IAudioClient_GetBufferSize(G.client, &G.buffer_frames);
}
/* ========================== *
@ -154,19 +154,19 @@ INTERNAL struct wasapi_buffer wasapi_update_begin(void)
/* Wait */
{
__profscope(wasapi_wait_on_event);
WaitForSingleObject(L.event, INFINITE);
WaitForSingleObject(G.event, INFINITE);
}
/* Get padding frames */
u32 padding_frames;
IAudioClient_GetCurrentPadding(L.client, &padding_frames);
IAudioClient_GetCurrentPadding(G.client, &padding_frames);
/* Get output buffer from WASAPI */
wspbuf.frames_count = 0;
if (padding_frames <= L.buffer_frames) {
wspbuf.frames_count = L.buffer_frames - padding_frames;
if (padding_frames <= G.buffer_frames) {
wspbuf.frames_count = G.buffer_frames - padding_frames;
}
IAudioRenderClient_GetBuffer(L.playback, wspbuf.frames_count, &wspbuf.frames);
IAudioRenderClient_GetBuffer(G.playback, wspbuf.frames_count, &wspbuf.frames);
return wspbuf;
}
@ -180,7 +180,7 @@ INTERNAL void wasapi_update_end(struct wasapi_buffer *wspbuf, struct mixed_pcm_f
u32 flags = 0;
if (frames_in_source == frames_in_output) {
/* Copy bytes to output */
u32 bytes_per_sample = L.buffer_format->nBlockAlign / L.buffer_format->nChannels;
u32 bytes_per_sample = G.buffer_format->nBlockAlign / G.buffer_format->nChannels;
u32 write_size = frames_in_source * 2 * bytes_per_sample;
MEMCPY(wspbuf->frames, src.samples, write_size);
} else {
@ -197,7 +197,7 @@ INTERNAL void wasapi_update_end(struct wasapi_buffer *wspbuf, struct mixed_pcm_f
#endif
/* Submit output buffer to WASAPI */
IAudioRenderClient_ReleaseBuffer(L.playback, frames_in_source, flags);
IAudioRenderClient_ReleaseBuffer(G.playback, frames_in_source, flags);
__profframe("Audio");
}
@ -211,7 +211,7 @@ INTERNAL SYS_THREAD_FUNC_DEF(playback_thread_entry_point, arg)
/* FIXME: If playback fails at any point and mixer stops advancing, we
* need to halt mixer to prevent memory leak when sounds are played. */
while (!L.shutdown) {
while (!G.shutdown) {
struct temp_arena scratch = scratch_begin_no_conflict();
struct wasapi_buffer wspbuf = wasapi_update_begin();
@ -227,13 +227,13 @@ struct playback_startup_receipt playback_startup(struct mixer_startup_receipt *m
(UNUSED)mixer_sr;
wasapi_initialize();
L.playback_thread = sys_thread_init(&playback_thread_entry_point, NULL, STR("[P3] Audio thread"));
G.playback_thread = sys_thread_init(&playback_thread_entry_point, NULL, STR("[P3] Audio thread"));
return (struct playback_startup_receipt) { 0 };
}
void playback_shutdown(void)
{
L.shutdown = true;
sys_thread_join(&L.playback_thread);
G.shutdown = true;
sys_thread_join(&G.playback_thread);
}

View File

@ -143,7 +143,7 @@ GLOBAL struct {
struct dx11_shader_desc shader_info[NUM_SHADERS];
} L = { 0 }, DEBUG_ALIAS(L, L_renderer_d3d11);
} G = { 0 }, DEBUG_ALIAS(G, G_renderer_d3d11);
/* ========================== *
* Util
@ -160,13 +160,13 @@ INLINE struct mat4x4 calculate_vp(struct xform view, f32 viewport_width, f32 vie
INTERNAL void send_constant_buffer_data(ID3D11Buffer *buffer, struct mat4x4 vp)
{
D3D11_MAPPED_SUBRESOURCE ms;
if (ID3D11DeviceContext_Map(L.devcon, (ID3D11Resource *)buffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &ms) != S_OK) {
if (ID3D11DeviceContext_Map(G.devcon, (ID3D11Resource *)buffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &ms) != S_OK) {
ASSERT(false);
return;
}
struct dx11_constant_buffer_data *data = (struct dx11_constant_buffer_data *)ms.pData;
MEMCPY(&data->vp, &vp, sizeof(vp));
ID3D11DeviceContext_Unmap(L.devcon, (ID3D11Resource *)buffer, 0);
ID3D11DeviceContext_Unmap(G.devcon, (ID3D11Resource *)buffer, 0);
}
/* ========================== *
@ -191,7 +191,7 @@ INTERNAL struct renderer_handle handle_alloc(void *data)
{
__prof;
struct handle_store *store = &L.handle_store;
struct handle_store *store = &G.handle_store;
struct handle_slot *slot = NULL;
sys_mutex_lock(&store->mutex);
@ -223,7 +223,7 @@ INTERNAL void handle_release(struct renderer_handle handle)
{
__prof;
struct handle_store *store = &L.handle_store;
struct handle_store *store = &G.handle_store;
u32 idx = HANDLE_IDX(handle);
u32 gen = HANDLE_GEN(handle);
@ -260,7 +260,7 @@ INTERNAL void *handle_data(struct renderer_handle handle)
__prof;
void *data = NULL;
struct handle_store *store = &L.handle_store;
struct handle_store *store = &G.handle_store;
u32 idx = HANDLE_IDX(handle);
u32 gen = HANDLE_GEN(handle);
if (idx < store->count) {
@ -297,8 +297,8 @@ INTERNAL void process_shader_compilation_error(ID3DBlob *error_blob)
INTERNAL void init_shader_table(void)
{
MEMZERO_ARRAY(L.shader_info);
L.shader_info[SHADER_TEXTURE] = (struct dx11_shader_desc) {
MEMZERO_ARRAY(G.shader_info);
G.shader_info[SHADER_TEXTURE] = (struct dx11_shader_desc) {
"shaders/texture.hlsl",
sizeof(struct texture_shader_vertex),
{
@ -315,7 +315,7 @@ INTERNAL void shader_init(struct dx11_shader *shader, enum shader_kind kind)
MEMZERO_STRUCT(shader);
struct temp_arena scratch = scratch_begin_no_conflict();
const struct dx11_shader_desc *shader_desc = &L.shader_info[kind];
const struct dx11_shader_desc *shader_desc = &G.shader_info[kind];
shader->kind = kind;
shader->vertex_size = shader_desc->vertex_size;
@ -331,7 +331,7 @@ INTERNAL void shader_init(struct dx11_shader *shader, enum shader_kind kind)
ID3DBlob *vs_blob, *ps_blob;
{
struct string name = string_from_cstr(shader_desc->name_cstr);
struct tar_entry *tar_entry = tar_get(&L.shaders_archive, name);
struct tar_entry *tar_entry = tar_get(&G.shaders_archive, name);
if (!tar_entry) {
sys_panic(string_format(scratch.arena,
STR("Could not find shader \"%F\""),
@ -363,11 +363,11 @@ INTERNAL void shader_init(struct dx11_shader *shader, enum shader_kind kind)
}
/* Create device layout */
ID3D11Device_CreateInputLayout(L.dev, shader_desc->input_layout_desc, elem_count, ID3D10Blob_GetBufferPointer(vs_blob), ID3D10Blob_GetBufferSize(vs_blob), &shader->input_layout);
ID3D11Device_CreateInputLayout(G.dev, shader_desc->input_layout_desc, elem_count, ID3D10Blob_GetBufferPointer(vs_blob), ID3D10Blob_GetBufferSize(vs_blob), &shader->input_layout);
/* Create shader */
ID3D11Device_CreateVertexShader(L.dev, ID3D10Blob_GetBufferPointer(vs_blob), ID3D10Blob_GetBufferSize(vs_blob), NULL, &shader->vs);
ID3D11Device_CreatePixelShader(L.dev, ID3D10Blob_GetBufferPointer(ps_blob), ID3D10Blob_GetBufferSize(ps_blob), NULL, &shader->ps);
ID3D11Device_CreateVertexShader(G.dev, ID3D10Blob_GetBufferPointer(vs_blob), ID3D10Blob_GetBufferSize(vs_blob), NULL, &shader->vs);
ID3D11Device_CreatePixelShader(G.dev, ID3D10Blob_GetBufferPointer(ps_blob), ID3D10Blob_GetBufferSize(ps_blob), NULL, &shader->ps);
ID3D10Blob_Release(vs_blob);
ID3D10Blob_Release(ps_blob);
@ -384,17 +384,17 @@ struct renderer_startup_receipt renderer_startup(struct sys_window *window)
{
__profscope(initializing_d3d11);
L.arena = arena_alloc(GIGABYTE(64));
G.arena = arena_alloc(GIGABYTE(64));
/* Allocate store */
L.handle_store.arena = arena_alloc(GIGABYTE(64));
L.handle_store.array = (struct handle_slot *)L.handle_store.arena.base;
L.handle_store.mutex = sys_mutex_alloc();
G.handle_store.arena = arena_alloc(GIGABYTE(64));
G.handle_store.array = (struct handle_slot *)G.handle_store.arena.base;
G.handle_store.mutex = sys_mutex_alloc();
/* Load shader archive */
struct buffer embedded_data = inc_shaders_tar();
if (embedded_data.size > 0) {
L.shaders_archive = tar_parse(&L.arena, embedded_data, STR("shaders/"));
G.shaders_archive = tar_parse(&G.arena, embedded_data, STR("shaders/"));
}
/* Initialize shader table */
@ -499,9 +499,9 @@ struct renderer_startup_receipt renderer_startup(struct sys_window *window)
/* TODO: Better message */
sys_panic(STR("Failed to initialize renderer"));
}
L.dev = device;
L.devcon = context;
L.swapchain = swapchain;
G.dev = device;
G.devcon = context;
G.swapchain = swapchain;
/* Create the blending setup */
{
@ -522,8 +522,8 @@ struct renderer_startup_receipt renderer_startup(struct sys_window *window)
};
/* FIXME: Free this? */
ID3D11Device_CreateBlendState(L.dev, &desc, &L.blend_state);
ID3D11DeviceContext_OMSetBlendState(L.devcon, L.blend_state, blend_factor, 0xffffffff);
ID3D11Device_CreateBlendState(G.dev, &desc, &G.blend_state);
ID3D11DeviceContext_OMSetBlendState(G.devcon, G.blend_state, blend_factor, 0xffffffff);
}
/* Create depth-stencil State */
@ -540,8 +540,8 @@ struct renderer_startup_receipt renderer_startup(struct sys_window *window)
desc.FrontFace.StencilFunc = D3D11_COMPARISON_ALWAYS;
desc.BackFace = desc.FrontFace;
/* FIXME: Free this? */
ID3D11Device_CreateDepthStencilState(L.dev, &desc, &L.depth_stencil_state);
ID3D11DeviceContext_OMSetDepthStencilState(L.devcon, L.depth_stencil_state, 0);
ID3D11Device_CreateDepthStencilState(G.dev, &desc, &G.depth_stencil_state);
ID3D11DeviceContext_OMSetDepthStencilState(G.devcon, G.depth_stencil_state, 0);
}
/* Create the rasterizer state */
@ -554,8 +554,8 @@ struct renderer_startup_receipt renderer_startup(struct sys_window *window)
.DepthClipEnable = true
};
/* FIXME: Free this? */
ID3D11Device_CreateRasterizerState(L.dev, &desc, &L.rasterizer_state);
ID3D11DeviceContext_RSSetState(L.devcon, L.rasterizer_state);
ID3D11Device_CreateRasterizerState(G.dev, &desc, &G.rasterizer_state);
ID3D11DeviceContext_RSSetState(G.devcon, G.rasterizer_state);
}
/* Create the sampler state */
@ -572,8 +572,8 @@ struct renderer_startup_receipt renderer_startup(struct sys_window *window)
.MaxLOD = D3D11_FLOAT32_MAX
};
/* FIXME: Free this? */
ID3D11Device_CreateSamplerState(L.dev, &desc, &L.sampler_state);
ID3D11DeviceContext_PSSetSamplers(L.devcon, 0, 1, &L.sampler_state);
ID3D11Device_CreateSamplerState(G.dev, &desc, &G.sampler_state);
ID3D11DeviceContext_PSSetSamplers(G.devcon, 0, 1, &G.sampler_state);
}
/* Create the constant buffer */
@ -586,7 +586,7 @@ struct renderer_startup_receipt renderer_startup(struct sys_window *window)
.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE,
.MiscFlags = 0
};
ID3D11Device_CreateBuffer(L.dev, &desc, NULL, &L.vs_constant_buffer);
ID3D11Device_CreateBuffer(G.dev, &desc, NULL, &G.vs_constant_buffer);
/* Apparently ByteWidth needs to be in multiples of 16? */
ASSERT(desc.ByteWidth % 16 == 0);
@ -595,7 +595,7 @@ struct renderer_startup_receipt renderer_startup(struct sys_window *window)
/* Init shaders */
for (u32 i = 1; i < NUM_SHADERS; ++i) {
/* Create shader */
shader_init(&L.shaders[i], i);
shader_init(&G.shaders[i], i);
}
return (struct renderer_startup_receipt) { 0 };
@ -609,8 +609,8 @@ struct renderer_canvas *renderer_canvas_alloc(void)
{
struct renderer_canvas *canvas = NULL;
for (u32 i = 0; i < MAX_CANVASES; ++i) {
if (!L.canvases[i].valid) {
canvas = &L.canvases[i];
if (!G.canvases[i].valid) {
canvas = &G.canvases[i];
break;
}
}
@ -697,7 +697,7 @@ void renderer_canvas_ensure_texture_cmd(struct renderer_canvas *canvas, struct t
/* Command parameters are not the same, insert new command */
struct renderer_cmd *cmd = arena_push(&canvas->cpu_cmd_store.arena, struct renderer_cmd);
*cmd = (struct renderer_cmd){
.shader = &L.shaders[SHADER_TEXTURE],
.shader = &G.shaders[SHADER_TEXTURE],
.texture = params.texture
};
@ -723,7 +723,7 @@ void renderer_canvas_send_to_gpu(struct renderer_canvas *canvas)
/* Create / grow vertex buffers */
for (u32 i = 1; i < ARRAY_COUNT(canvas->buffers); ++i) {
struct dx11_buffer *buffer = &canvas->buffers[i];
struct dx11_shader *shader = &L.shaders[i];
struct dx11_shader *shader = &G.shaders[i];
u32 vertex_size = shader->vertex_size;
u32 index_size = sizeof(vidx);
@ -740,7 +740,7 @@ void renderer_canvas_send_to_gpu(struct renderer_canvas *canvas)
.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE
};
/* TODO: Assert res >= 0 (success) */
ID3D11Device_CreateBuffer(L.dev, &desc, NULL, &buffer->gpu_vertex_buffer);
ID3D11Device_CreateBuffer(G.dev, &desc, NULL, &buffer->gpu_vertex_buffer);
}
/* Create / grow index buffer */
@ -753,17 +753,17 @@ void renderer_canvas_send_to_gpu(struct renderer_canvas *canvas)
.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE
};
/* TODO: Assert res >= 0 (success) */
ID3D11Device_CreateBuffer(L.dev, &desc, NULL, &buffer->gpu_index_buffer);
ID3D11Device_CreateBuffer(G.dev, &desc, NULL, &buffer->gpu_index_buffer);
}
/* Copy data to GPU */
D3D11_MAPPED_SUBRESOURCE vtx_resource, idx_resource;
ID3D11DeviceContext_Map(L.devcon, (ID3D11Resource *)buffer->gpu_vertex_buffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &vtx_resource);
ID3D11DeviceContext_Map(L.devcon, (ID3D11Resource *)buffer->gpu_index_buffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &idx_resource);
ID3D11DeviceContext_Map(G.devcon, (ID3D11Resource *)buffer->gpu_vertex_buffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &vtx_resource);
ID3D11DeviceContext_Map(G.devcon, (ID3D11Resource *)buffer->gpu_index_buffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &idx_resource);
MEMCPY(vtx_resource.pData, buffer->cpu_vertex_buffer, buffer->vertex_count * vertex_size);
MEMCPY(idx_resource.pData, buffer->cpu_index_buffer, buffer->index_count * index_size);
ID3D11DeviceContext_Unmap(L.devcon, (ID3D11Resource *)buffer->gpu_vertex_buffer, 0);
ID3D11DeviceContext_Unmap(L.devcon, (ID3D11Resource *)buffer->gpu_index_buffer, 0);
ID3D11DeviceContext_Unmap(G.devcon, (ID3D11Resource *)buffer->gpu_vertex_buffer, 0);
ID3D11DeviceContext_Unmap(G.devcon, (ID3D11Resource *)buffer->gpu_index_buffer, 0);
/* Reset CPU buffers */
buffer->vertex_count = 0;
@ -793,17 +793,17 @@ INTERNAL void resize_backbuffer(struct v2 size)
/* TODO: error handling */
/* Release all outstanding references to the swap chain's buffers. */
if (L.backbuffer_view) {
ID3D11RenderTargetView_Release(L.backbuffer_view);
if (G.backbuffer_view) {
ID3D11RenderTargetView_Release(G.backbuffer_view);
}
IDXGISwapChain_ResizeBuffers(L.swapchain, 0, (UINT)size.x, (UINT)size.y, DXGI_FORMAT_UNKNOWN, 0);
IDXGISwapChain_ResizeBuffers(G.swapchain, 0, (UINT)size.x, (UINT)size.y, DXGI_FORMAT_UNKNOWN, 0);
/* Get buffer and create a render-target-view. */
ID3D11Texture2D *backbuffer_texture = NULL;
IDXGISwapChain_GetBuffer(L.swapchain, 0, &IID_ID3D11Texture2D, (LPVOID *)&backbuffer_texture);
IDXGISwapChain_GetBuffer(G.swapchain, 0, &IID_ID3D11Texture2D, (LPVOID *)&backbuffer_texture);
ID3D11Device_CreateRenderTargetView(L.dev, (ID3D11Resource *)backbuffer_texture, NULL, &L.backbuffer_view);
ID3D11Device_CreateRenderTargetView(G.dev, (ID3D11Resource *)backbuffer_texture, NULL, &G.backbuffer_view);
ID3D11Texture2D_Release(backbuffer_texture);
}
@ -817,7 +817,7 @@ INTERNAL void resize_viewport(struct rect viewport)
.TopLeftX = viewport.x,
.TopLeftY = viewport.y
};
ID3D11DeviceContext_RSSetViewports(L.devcon, 1, &d3d11_viewport);
ID3D11DeviceContext_RSSetViewports(G.devcon, 1, &d3d11_viewport);
}
/* TODO: Lock canvas or at least global state? (in-case multi-threaded present).
@ -830,24 +830,24 @@ void renderer_canvas_present(struct renderer_canvas **canvases, u32 canvases_cou
__prof;
/* Resize back buffer */
if (!v2_eq(L.backbuffer_size, screen_size)) {
if (!v2_eq(G.backbuffer_size, screen_size)) {
resize_backbuffer(screen_size);
L.backbuffer_size = screen_size;
G.backbuffer_size = screen_size;
}
if (!rect_eq(L.viewport, viewport)) {
if (!rect_eq(G.viewport, viewport)) {
resize_viewport(viewport);
L.viewport = viewport;
G.viewport = viewport;
}
ID3D11DeviceContext_OMSetRenderTargets(L.devcon, 1, &L.backbuffer_view, NULL);
ID3D11DeviceContext_OMSetRenderTargets(G.devcon, 1, &G.backbuffer_view, NULL);
/* Clear back buffer */
f32 clear_color[4] = { 0.0f, 0.0f, 0.0f, 1.0f };
ID3D11DeviceContext_ClearRenderTargetView(L.devcon, L.backbuffer_view, clear_color);
ID3D11DeviceContext_ClearRenderTargetView(G.devcon, G.backbuffer_view, clear_color);
/* Set draw mode */
ID3D11DeviceContext_IASetPrimitiveTopology(L.devcon, D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
ID3D11DeviceContext_IASetPrimitiveTopology(G.devcon, D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
for (u32 i = 0; i < canvases_count; ++i) {
struct renderer_canvas *canvas = canvases[i];
@ -856,8 +856,8 @@ void renderer_canvas_present(struct renderer_canvas **canvases, u32 canvases_cou
* NOTE: We're only doing this once per canvas, rather than once per draw call since
* the only constant right now is VP. */
struct mat4x4 vp_matrix = calculate_vp(canvas->view, viewport.width, viewport.height);
send_constant_buffer_data(L.vs_constant_buffer, vp_matrix);
ID3D11DeviceContext_VSSetConstantBuffers(L.devcon, 0, 1, &L.vs_constant_buffer);
send_constant_buffer_data(G.vs_constant_buffer, vp_matrix);
ID3D11DeviceContext_VSSetConstantBuffers(G.devcon, 0, 1, &G.vs_constant_buffer);
struct dx11_shader *last_shader = NULL;
struct renderer_handle last_texture_handle = { 0 };
@ -868,9 +868,9 @@ void renderer_canvas_present(struct renderer_canvas **canvases, u32 canvases_cou
/* Activate shader */
if (shader != last_shader) {
ID3D11DeviceContext_VSSetShader(L.devcon, shader->vs, 0, 0);
ID3D11DeviceContext_PSSetShader(L.devcon, shader->ps, 0, 0);
ID3D11DeviceContext_IASetInputLayout(L.devcon, shader->input_layout);
ID3D11DeviceContext_VSSetShader(G.devcon, shader->vs, 0, 0);
ID3D11DeviceContext_PSSetShader(G.devcon, shader->ps, 0, 0);
ID3D11DeviceContext_IASetInputLayout(G.devcon, shader->input_layout);
last_shader = shader;
}
@ -878,7 +878,7 @@ void renderer_canvas_present(struct renderer_canvas **canvases, u32 canvases_cou
/* Activate texture */
if (!handle_eq(texture_handle, last_texture_handle)) {
ID3D11ShaderResourceView *texture_srv = handle_data(texture_handle);
ID3D11DeviceContext_PSSetShaderResources(L.devcon, 0, 1, &texture_srv);
ID3D11DeviceContext_PSSetShaderResources(G.devcon, 0, 1, &texture_srv);
last_texture_handle = texture_handle;
}
@ -889,18 +889,18 @@ void renderer_canvas_present(struct renderer_canvas **canvases, u32 canvases_cou
/* Activate buffer */
u32 zero = 0;
UINT vertex_stride = shader->vertex_size;
ID3D11DeviceContext_IASetVertexBuffers(L.devcon, 0, 1, &buffer->gpu_vertex_buffer, &vertex_stride, &zero);
ID3D11DeviceContext_IASetIndexBuffer(L.devcon, buffer->gpu_index_buffer, DXGI_FORMAT_R32_UINT, zero);
ID3D11DeviceContext_IASetVertexBuffers(G.devcon, 0, 1, &buffer->gpu_vertex_buffer, &vertex_stride, &zero);
ID3D11DeviceContext_IASetIndexBuffer(G.devcon, buffer->gpu_index_buffer, DXGI_FORMAT_R32_UINT, zero);
/* Draw */
ID3D11DeviceContext_DrawIndexed(L.devcon, index_count, index_offset, vertex_offset);
ID3D11DeviceContext_DrawIndexed(G.devcon, index_count, index_offset, vertex_offset);
}
}
/* Present */
{
__profscope(IDXGISwapchain_Present);
IDXGISwapChain1_Present(L.swapchain, vsync, 0);
IDXGISwapChain1_Present(G.swapchain, vsync, 0);
__profframe(0);
}
renderer_capture_image_for_profiler(viewport.width, viewport.height);
@ -932,7 +932,7 @@ struct renderer_handle renderer_texture_alloc(struct image_rgba data)
.SysMemPitch = data.width * 4,
.SysMemSlicePitch = 0
};
ID3D11Device_CreateTexture2D(L.dev, &desc, &subresource_data, &texture);
ID3D11Device_CreateTexture2D(G.dev, &desc, &subresource_data, &texture);
/* Create srv */
ID3D11ShaderResourceView *texture_srv = NULL;
@ -943,7 +943,7 @@ struct renderer_handle renderer_texture_alloc(struct image_rgba data)
.Texture2D.MipLevels = desc.MipLevels,
.Texture2D.MostDetailedMip = 0
};
ID3D11Device_CreateShaderResourceView(L.dev, (ID3D11Resource *)texture, &shader_resource_view_desc, &texture_srv);
ID3D11Device_CreateShaderResourceView(G.dev, (ID3D11Resource *)texture, &shader_resource_view_desc, &texture_srv);
ID3D11Texture2D_Release(texture);
}
ASSERT(texture_srv != NULL);
@ -995,7 +995,7 @@ INTERNAL void renderer_capture_image_for_profiler(f32 width, f32 height)
static b32 ready_to_read = false;
ID3D11Texture2D *backbuffer = NULL;
IDXGISwapChain_GetBuffer(L.swapchain, 0, &IID_ID3D11Texture2D, (LPVOID *)&backbuffer);
IDXGISwapChain_GetBuffer(G.swapchain, 0, &IID_ID3D11Texture2D, (LPVOID *)&backbuffer);
struct prof_cap *write_cap = &staging_caps[cap_index];
*write_cap = (struct prof_cap) { .size = V2(width, height) };
@ -1005,10 +1005,10 @@ INTERNAL void renderer_capture_image_for_profiler(f32 width, f32 height)
staging_desc.Usage = D3D11_USAGE_STAGING;
staging_desc.BindFlags = 0;
staging_desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
ID3D11Device_CreateTexture2D(L.dev, &staging_desc, NULL, &write_cap->texture);
ID3D11Device_CreateTexture2D(G.dev, &staging_desc, NULL, &write_cap->texture);
}
ID3D11DeviceContext_CopyResource(L.devcon, (ID3D11Resource *)write_cap->texture, (ID3D11Resource *)backbuffer);
ID3D11DeviceContext_CopyResource(G.devcon, (ID3D11Resource *)write_cap->texture, (ID3D11Resource *)backbuffer);
ID3D11Texture2D_Release(backbuffer);
++cap_index;
@ -1021,7 +1021,7 @@ INTERNAL void renderer_capture_image_for_profiler(f32 width, f32 height)
struct prof_cap *read_cap = &staging_caps[cap_index];
{
D3D11_MAPPED_SUBRESOURCE res;
ID3D11DeviceContext_Map(L.devcon, (ID3D11Resource *)read_cap->texture, 0, D3D11_MAP_READ, 0, &res);
ID3D11DeviceContext_Map(G.devcon, (ID3D11Resource *)read_cap->texture, 0, D3D11_MAP_READ, 0, &res);
u32 final_width = CAP_WIDTH;
u32 final_height = CAP_HEIGHT;
f32 width_frequency = (f32)read_cap->size.x / (f32)final_width;
@ -1048,7 +1048,7 @@ INTERNAL void renderer_capture_image_for_profiler(f32 width, f32 height)
scratch_end(scratch);
}
ID3D11DeviceContext_Unmap(L.devcon, (ID3D11Resource *)read_cap->texture, 0);
ID3D11DeviceContext_Unmap(G.devcon, (ID3D11Resource *)read_cap->texture, 0);
}
ID3D11Texture2D_Release(read_cap->texture);
}

View File

@ -12,7 +12,7 @@
GLOBAL struct {
struct arena arena;
struct tar_archive archive;
} L = { 0 }, DEBUG_ALIAS(L, L_resource);
} G = { 0 }, DEBUG_ALIAS(G, G_resource);
#endif
struct resource_startup_receipt resource_startup(void)
@ -20,11 +20,11 @@ struct resource_startup_receipt resource_startup(void)
#if RESOURCES_EMBEDDED
struct buffer embedded_data = inc_res_tar();
//struct buffer embedded_data = ((struct buffer) { (u8 *)(_incbin_res_tar_end) - (u8 *)(_incbin_res_tar_start), (u8 *)_incbin_res_tar_start });;
L.arena = arena_alloc(GIGABYTE(64));
G.arena = arena_alloc(GIGABYTE(64));
if (embedded_data.size <= 0) {
sys_panic(STR("No embedded resources found"));
}
L.archive = tar_parse(&L.arena, embedded_data, STR("res/"));
G.archive = tar_parse(&G.arena, embedded_data, STR("res/"));
#else
/* Ensure we have the right working directory */
if (!sys_is_dir(STR("res"))) {
@ -39,7 +39,7 @@ b32 resource_exists(struct string path)
{
__prof;
#if RESOURCES_EMBEDDED
struct tar_entry *entry = tar_get(&L.archive, path);
struct tar_entry *entry = tar_get(&G.archive, path);
return entry && !entry->is_dir;
#else
return sys_is_file(path);
@ -50,7 +50,7 @@ struct resource resource_open(struct string path)
{
__prof;
#if RESOURCES_EMBEDDED
struct tar_entry *entry = tar_get(&L.archive, path);
struct tar_entry *entry = tar_get(&G.archive, path);
return (struct resource) {
.bytes = entry ? entry->buff : BUFFER(0, 0)
};

View File

@ -29,7 +29,7 @@ struct sheet_task_params_store {
GLOBAL struct {
struct sheet_task_params_store params;
} L = { 0 }, DEBUG_ALIAS(L, L_sheet);
} G = { 0 }, DEBUG_ALIAS(G, G_sheet);
/* ========================== *
@ -44,8 +44,8 @@ struct sheet_startup_receipt sheet_startup(struct work_startup_receipt *work_sr,
(UNUSED)asset_cache_sr;
(UNUSED)resource_sr;
L.params.arena = arena_alloc(GIGABYTE(64));
L.params.mutex = sys_mutex_alloc();
G.params.arena = arena_alloc(GIGABYTE(64));
G.params.mutex = sys_mutex_alloc();
return (struct sheet_startup_receipt) { 0 };
}
@ -57,27 +57,27 @@ struct sheet_startup_receipt sheet_startup(struct work_startup_receipt *work_sr,
INTERNAL struct sheet_task_params *sheet_task_params_alloc(void)
{
struct sheet_task_params *p = NULL;
sys_mutex_lock(&L.params.mutex);
sys_mutex_lock(&G.params.mutex);
{
if (L.params.head_free) {
p = L.params.head_free;
L.params.head_free = p->next_free;
if (G.params.head_free) {
p = G.params.head_free;
G.params.head_free = p->next_free;
} else {
p = arena_push_zero(&L.params.arena, struct sheet_task_params);
p = arena_push_zero(&G.params.arena, struct sheet_task_params);
}
}
sys_mutex_unlock(&L.params.mutex);
sys_mutex_unlock(&G.params.mutex);
return p;
}
INTERNAL void sheet_task_params_release(struct sheet_task_params *p)
{
sys_mutex_lock(&L.params.mutex);
sys_mutex_lock(&G.params.mutex);
{
p->next_free = L.params.head_free;
L.params.head_free = p;
p->next_free = G.params.head_free;
G.params.head_free = p;
}
sys_mutex_unlock(&L.params.mutex);
sys_mutex_unlock(&G.params.mutex);
}
/* ========================== *

View File

@ -29,7 +29,7 @@ struct sound_task_params_store {
GLOBAL struct {
struct sound_task_params_store params;
} L = { 0 }, DEBUG_ALIAS(L, L_sound);
} G = { 0 }, DEBUG_ALIAS(G, G_sound);
/* ========================== *
* Startup
@ -43,8 +43,8 @@ struct sound_startup_receipt sound_startup(struct work_startup_receipt *work_sr,
(UNUSED)asset_cache_sr;
(UNUSED)resource_sr;
L.params.arena = arena_alloc(GIGABYTE(64));
L.params.mutex = sys_mutex_alloc();
G.params.arena = arena_alloc(GIGABYTE(64));
G.params.mutex = sys_mutex_alloc();
return (struct sound_startup_receipt) { 0 };
}
@ -57,28 +57,28 @@ INTERNAL struct sound_task_params *sound_task_params_alloc(void)
{
struct sound_task_params *p = NULL;
{
sys_mutex_lock(&L.params.mutex);
sys_mutex_lock(&G.params.mutex);
{
if (L.params.head_free) {
p = L.params.head_free;
L.params.head_free = p->next_free;
if (G.params.head_free) {
p = G.params.head_free;
G.params.head_free = p->next_free;
} else {
p = arena_push_zero(&L.params.arena, struct sound_task_params);
p = arena_push_zero(&G.params.arena, struct sound_task_params);
}
}
sys_mutex_unlock(&L.params.mutex);
sys_mutex_unlock(&G.params.mutex);
}
return p;
}
INTERNAL void sound_task_params_release(struct sound_task_params *p)
{
sys_mutex_lock(&L.params.mutex);
sys_mutex_lock(&G.params.mutex);
{
p->next_free = L.params.head_free;
L.params.head_free = p;
p->next_free = G.params.head_free;
G.params.head_free = p;
}
sys_mutex_unlock(&L.params.mutex);
sys_mutex_unlock(&G.params.mutex);
}
/* ========================== *

View File

@ -116,7 +116,7 @@ GLOBAL struct {
struct sys_mutex windows_mutex;
struct arena windows_arena;
struct win32_window *first_free_window;
} L = { 0 }, DEBUG_ALIAS(L, L_sys_win32);
} G = { 0 }, DEBUG_ALIAS(G, G_sys_win32);
/* ========================== *
* Events
@ -125,45 +125,45 @@ GLOBAL struct {
/* https://git.rfleury.com/community/root_basic/src/commit/9b49fcd24e0c3f875b7c213e81a219bf8544bddb/code/os/gfx/win32/os_gfx_win32.c#L193 */
INTERNAL void win32_init_vk_btn_table(void)
{
MEMZERO_ARRAY(L.vk_btn_table);
MEMZERO_ARRAY(G.vk_btn_table);
for (u32 i = 'A', j = SYS_BTN_A; i <= 'Z'; i += 1, j += 1) {
L.vk_btn_table[i] = (enum sys_btn)j;
G.vk_btn_table[i] = (enum sys_btn)j;
}
for (u32 i = '0', j = SYS_BTN_0; i <= '9'; i += 1, j += 1) {
L.vk_btn_table[i] = (enum sys_btn)j;
G.vk_btn_table[i] = (enum sys_btn)j;
}
for (u32 i = VK_F1, j = SYS_BTN_F1; i <= VK_F24; i += 1, j += 1) {
L.vk_btn_table[i] = (enum sys_btn)j;
G.vk_btn_table[i] = (enum sys_btn)j;
}
L.vk_btn_table[VK_ESCAPE] = SYS_BTN_ESC;
L.vk_btn_table[VK_OEM_3] = SYS_BTN_GRAVE_ACCENT;
L.vk_btn_table[VK_OEM_MINUS] = SYS_BTN_MINUS;
L.vk_btn_table[VK_OEM_PLUS] = SYS_BTN_EQUAL;
L.vk_btn_table[VK_BACK] = SYS_BTN_BACKSPACE;
L.vk_btn_table[VK_TAB] = SYS_BTN_TAB;
L.vk_btn_table[VK_SPACE] = SYS_BTN_SPACE;
L.vk_btn_table[VK_RETURN] = SYS_BTN_ENTER;
L.vk_btn_table[VK_CONTROL] = SYS_BTN_CTRL;
L.vk_btn_table[VK_SHIFT] = SYS_BTN_SHIFT;
L.vk_btn_table[VK_MENU] = SYS_BTN_ALT;
L.vk_btn_table[VK_UP] = SYS_BTN_UP;
L.vk_btn_table[VK_LEFT] = SYS_BTN_LEFT;
L.vk_btn_table[VK_DOWN] = SYS_BTN_DOWN;
L.vk_btn_table[VK_RIGHT] = SYS_BTN_RIGHT;
L.vk_btn_table[VK_DELETE] = SYS_BTN_DELETE;
L.vk_btn_table[VK_PRIOR] = SYS_BTN_PAGE_UP;
L.vk_btn_table[VK_NEXT] = SYS_BTN_PAGE_DOWN;
L.vk_btn_table[VK_HOME] = SYS_BTN_HOME;
L.vk_btn_table[VK_END] = SYS_BTN_END;
L.vk_btn_table[VK_OEM_2] = SYS_BTN_FORWARD_SLASH;
L.vk_btn_table[VK_OEM_PERIOD] = SYS_BTN_PERIOD;
L.vk_btn_table[VK_OEM_COMMA] = SYS_BTN_COMMA;
L.vk_btn_table[VK_OEM_7] = SYS_BTN_QUOTE;
L.vk_btn_table[VK_OEM_4] = SYS_BTN_LEFT_BRACKET;
L.vk_btn_table[VK_OEM_6] = SYS_BTN_RIGHT_BRACKET;
L.vk_btn_table[VK_INSERT] = SYS_BTN_INSERT;
L.vk_btn_table[VK_OEM_1] = SYS_BTN_SEMICOLON;
G.vk_btn_table[VK_ESCAPE] = SYS_BTN_ESC;
G.vk_btn_table[VK_OEM_3] = SYS_BTN_GRAVE_ACCENT;
G.vk_btn_table[VK_OEM_MINUS] = SYS_BTN_MINUS;
G.vk_btn_table[VK_OEM_PLUS] = SYS_BTN_EQUAL;
G.vk_btn_table[VK_BACK] = SYS_BTN_BACKSPACE;
G.vk_btn_table[VK_TAB] = SYS_BTN_TAB;
G.vk_btn_table[VK_SPACE] = SYS_BTN_SPACE;
G.vk_btn_table[VK_RETURN] = SYS_BTN_ENTER;
G.vk_btn_table[VK_CONTROL] = SYS_BTN_CTRL;
G.vk_btn_table[VK_SHIFT] = SYS_BTN_SHIFT;
G.vk_btn_table[VK_MENU] = SYS_BTN_ALT;
G.vk_btn_table[VK_UP] = SYS_BTN_UP;
G.vk_btn_table[VK_LEFT] = SYS_BTN_LEFT;
G.vk_btn_table[VK_DOWN] = SYS_BTN_DOWN;
G.vk_btn_table[VK_RIGHT] = SYS_BTN_RIGHT;
G.vk_btn_table[VK_DELETE] = SYS_BTN_DELETE;
G.vk_btn_table[VK_PRIOR] = SYS_BTN_PAGE_UP;
G.vk_btn_table[VK_NEXT] = SYS_BTN_PAGE_DOWN;
G.vk_btn_table[VK_HOME] = SYS_BTN_HOME;
G.vk_btn_table[VK_END] = SYS_BTN_END;
G.vk_btn_table[VK_OEM_2] = SYS_BTN_FORWARD_SLASH;
G.vk_btn_table[VK_OEM_PERIOD] = SYS_BTN_PERIOD;
G.vk_btn_table[VK_OEM_COMMA] = SYS_BTN_COMMA;
G.vk_btn_table[VK_OEM_7] = SYS_BTN_QUOTE;
G.vk_btn_table[VK_OEM_4] = SYS_BTN_LEFT_BRACKET;
G.vk_btn_table[VK_OEM_6] = SYS_BTN_RIGHT_BRACKET;
G.vk_btn_table[VK_INSERT] = SYS_BTN_INSERT;
G.vk_btn_table[VK_OEM_1] = SYS_BTN_SEMICOLON;
}
/* ========================== *
@ -569,7 +569,7 @@ INTERNAL HWND win32_create_window(struct win32_window *window)
/* TODO: Check for hwnd success */
HWND hwnd = CreateWindowExW(
exstyle,
L.window_class.lpszClassName,
G.window_class.lpszClassName,
L"",
WS_OVERLAPPEDWINDOW,
CW_USEDEFAULT,
@ -578,7 +578,7 @@ INTERNAL HWND win32_create_window(struct win32_window *window)
CW_USEDEFAULT,
NULL,
NULL,
L.window_class.hInstance,
G.window_class.hInstance,
NULL
);
@ -675,16 +675,16 @@ INTERNAL struct win32_window *win32_window_alloc(void)
{
struct win32_window *window = NULL;
sys_mutex_lock(&L.windows_mutex);
sys_mutex_lock(&G.windows_mutex);
{
if (L.first_free_window) {
window = L.first_free_window;
L.first_free_window = window->next_free;
if (G.first_free_window) {
window = G.first_free_window;
G.first_free_window = window->next_free;
} else {
window = arena_push(&L.windows_arena, struct win32_window);
window = arena_push(&G.windows_arena, struct win32_window);
}
}
sys_mutex_unlock(&L.windows_mutex);
sys_mutex_unlock(&G.windows_mutex);
MEMZERO_STRUCT(window);
/* Allocate sync flag */
@ -705,10 +705,10 @@ INTERNAL struct win32_window *win32_window_alloc(void)
INTERNAL void win32_window_release(struct win32_window *window)
{
sys_mutex_lock(&L.windows_mutex);
sys_mutex_lock(&G.windows_mutex);
{
window->next_free = L.first_free_window;
L.first_free_window = window;
window->next_free = G.first_free_window;
G.first_free_window = window;
/* Stop window thread */
window->event_thread_shutdown = true;
@ -721,7 +721,7 @@ INTERNAL void win32_window_release(struct win32_window *window)
/* Release sync flag */
sync_flag_release(&window->ready_sf);
}
sys_mutex_unlock(&L.windows_mutex);
sys_mutex_unlock(&G.windows_mutex);
}
INTERNAL void win32_update_window_from_system(struct win32_window *window)
@ -900,8 +900,8 @@ INTERNAL LRESULT CALLBACK win32_window_proc(HWND hwnd, UINT msg, WPARAM wparam,
}
enum sys_btn button = SYS_BTN_NONE;
if (vk_code < ARRAY_COUNT(L.vk_btn_table)) {
button = L.vk_btn_table[vk_code];
if (vk_code < ARRAY_COUNT(G.vk_btn_table)) {
button = G.vk_btn_table[vk_code];
}
win32_window_process_event(
@ -1338,16 +1338,16 @@ INTERNAL struct win32_condition_variable *win32_condition_variable_alloc(void)
__prof;
struct win32_condition_variable *cv = NULL;
{
sys_mutex_lock(&L.condition_variables_mutex);
sys_mutex_lock(&G.condition_variables_mutex);
{
if (L.first_free_condition_variable) {
cv = L.first_free_condition_variable;
L.first_free_condition_variable = cv->next_free;
if (G.first_free_condition_variable) {
cv = G.first_free_condition_variable;
G.first_free_condition_variable = cv->next_free;
} else {
cv = arena_push_zero(&L.condition_variables_arena, struct win32_condition_variable);
cv = arena_push_zero(&G.condition_variables_arena, struct win32_condition_variable);
}
}
sys_mutex_unlock(&L.condition_variables_mutex);
sys_mutex_unlock(&G.condition_variables_mutex);
}
MEMZERO_STRUCT(cv);
@ -1359,12 +1359,12 @@ INTERNAL struct win32_condition_variable *win32_condition_variable_alloc(void)
INTERNAL void win32_condition_variable_release(struct win32_condition_variable *w32cv)
{
__prof;
sys_mutex_lock(&L.condition_variables_mutex);
sys_mutex_lock(&G.condition_variables_mutex);
{
w32cv->next_free = L.first_free_condition_variable;
L.first_free_condition_variable = w32cv;
w32cv->next_free = G.first_free_condition_variable;
G.first_free_condition_variable = w32cv;
}
sys_mutex_unlock(&L.condition_variables_mutex);
sys_mutex_unlock(&G.condition_variables_mutex);
}
@ -1461,13 +1461,13 @@ struct win32_tls {
INTERNAL void win32_thread_set_tls(struct win32_tls *ctx)
{
/* TODO: Fail if error */
TlsSetValue(L.thread_tls_index, (LPVOID)ctx);
TlsSetValue(G.thread_tls_index, (LPVOID)ctx);
}
INTERNAL struct win32_tls *win32_thread_get_tls(void)
{
/* TODO: Fail if error */
return TlsGetValue(L.thread_tls_index);
return TlsGetValue(G.thread_tls_index);
}
INTERNAL struct win32_tls win32_tls_alloc(void)
@ -1497,28 +1497,28 @@ struct thread_local_store *sys_thread_get_thread_local_store(void)
INTERNAL struct win32_thread_params *thread_params_alloc(void)
{
struct win32_thread_params *tp = NULL;
sys_mutex_lock(&L.thread_params_mutex);
sys_mutex_lock(&G.thread_params_mutex);
{
if (L.first_free_thread_params) {
tp = L.first_free_thread_params;
L.first_free_thread_params = tp->next_free;
if (G.first_free_thread_params) {
tp = G.first_free_thread_params;
G.first_free_thread_params = tp->next_free;
} else {
tp = arena_push(&L.thread_params_arena, struct win32_thread_params);
tp = arena_push(&G.thread_params_arena, struct win32_thread_params);
}
}
sys_mutex_unlock(&L.thread_params_mutex);
sys_mutex_unlock(&G.thread_params_mutex);
MEMZERO_STRUCT(tp);
return tp;
}
INTERNAL void thread_params_release(struct win32_thread_params *tp)
{
sys_mutex_lock(&L.thread_params_mutex);
sys_mutex_lock(&G.thread_params_mutex);
{
tp->next_free = L.first_free_thread_params;
L.first_free_thread_params = tp;
tp->next_free = G.first_free_thread_params;
G.first_free_thread_params = tp;
}
sys_mutex_unlock(&L.thread_params_mutex);
sys_mutex_unlock(&G.thread_params_mutex);
}
INTERNAL DWORD WINAPI win32_thread_proc(LPVOID params)
@ -1668,7 +1668,7 @@ sys_timestamp_t sys_timestamp(void)
{
LARGE_INTEGER time;
QueryPerformanceCounter(&time);
return (u64)_win32_i64_muldiv(time.QuadPart - L.timer_start.QuadPart, 1000000000, L.timer_frequency.QuadPart);
return (u64)_win32_i64_muldiv(time.QuadPart - G.timer_start.QuadPart, 1000000000, G.timer_frequency.QuadPart);
}
f64 sys_timestamp_seconds(sys_timestamp_t ts)
@ -1792,8 +1792,8 @@ INTERNAL void win32_precise_sleep_timer(f64 seconds, HANDLE timer)
/* TODO: Does the high frequency timer even require setting / scaling of
* timeBeginPeriod/scheduler_period_ms? There isn't much documentation. */
i64 qpc_per_second = L.timer_frequency.QuadPart;;
i32 scheduler_period_ms = L.scheduler_period_ms;
i64 qpc_per_second = G.timer_frequency.QuadPart;;
i32 scheduler_period_ms = G.scheduler_period_ms;
LARGE_INTEGER qpc;
QueryPerformanceCounter(&qpc);
@ -1832,8 +1832,8 @@ INTERNAL void win32_precise_sleep_timer(f64 seconds, HANDLE timer)
INTERNAL void win32_precise_sleep_legacy(f64 seconds)
{
__prof;
i64 qpc_per_second = L.timer_frequency.QuadPart;
i32 scheduler_period_ms = L.scheduler_period_ms;
i64 qpc_per_second = G.timer_frequency.QuadPart;
i32 scheduler_period_ms = G.scheduler_period_ms;
LARGE_INTEGER qpc;
QueryPerformanceCounter(&qpc);
@ -1915,33 +1915,33 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
SetThreadDescription(GetCurrentThread(), L"Main thread");
/* Query system info */
GetSystemInfo(&L.info);
QueryPerformanceFrequency(&L.timer_frequency);
QueryPerformanceCounter(&L.timer_start);
GetSystemInfo(&G.info);
QueryPerformanceFrequency(&G.timer_frequency);
QueryPerformanceCounter(&G.timer_start);
{
TIMECAPS caps;
timeGetDevCaps(&caps, sizeof caps);
L.scheduler_period_ms = (i32)caps.wPeriodMin;
G.scheduler_period_ms = (i32)caps.wPeriodMin;
}
/* Set up timing period */
timeBeginPeriod(L.scheduler_period_ms);
timeBeginPeriod(G.scheduler_period_ms);
/* Set up condition variables */
L.condition_variables_mutex = sys_mutex_alloc();
L.condition_variables_arena = arena_alloc(GIGABYTE(64));
G.condition_variables_mutex = sys_mutex_alloc();
G.condition_variables_arena = arena_alloc(GIGABYTE(64));
/* Set up threads */
L.thread_params_mutex = sys_mutex_alloc();
L.thread_params_arena = arena_alloc(GIGABYTE(64));
G.thread_params_mutex = sys_mutex_alloc();
G.thread_params_arena = arena_alloc(GIGABYTE(64));
/* Set up windows */
L.windows_mutex = sys_mutex_alloc();
L.windows_arena = arena_alloc(GIGABYTE(64));
G.windows_mutex = sys_mutex_alloc();
G.windows_arena = arena_alloc(GIGABYTE(64));
/* Set up TLS index */
L.thread_tls_index = TlsAlloc();
if (L.thread_tls_index == TLS_OUT_OF_INDEXES) {
G.thread_tls_index = TlsAlloc();
if (G.thread_tls_index == TLS_OUT_OF_INDEXES) {
/* TODO: GetLastError */
error_msg = L"Platform initialization error: TLS_OUT_OF_INDEXES";
goto abort;
@ -1953,7 +1953,7 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
/* Create window class */
{
/* Register the window class */
WNDCLASSEXW *wc = &L.window_class;
WNDCLASSEXW *wc = &G.window_class;
wc->cbSize = sizeof(WNDCLASSEX);
wc->lpszClassName = WINDOW_CLASS_NAME;
wc->hCursor = LoadCursor(NULL, IDC_ARROW);

View File

@ -32,7 +32,7 @@ struct texture_task_params_store {
GLOBAL struct {
struct texture_task_params_store params;
} L = { 0 }, DEBUG_ALIAS(L, L_texture);
} G = { 0 }, DEBUG_ALIAS(G, G_texture);
/* ========================== *
* Startup
@ -48,8 +48,8 @@ struct texture_startup_receipt texture_startup(struct work_startup_receipt *work
(UNUSED)asset_cache_sr;
(UNUSED)resource_sr;
L.params.arena = arena_alloc(GIGABYTE(64));
L.params.mutex = sys_mutex_alloc();
G.params.arena = arena_alloc(GIGABYTE(64));
G.params.mutex = sys_mutex_alloc();
return (struct texture_startup_receipt) { 0 };
}
@ -62,28 +62,28 @@ INTERNAL struct texture_task_params *texture_task_params_alloc(void)
{
struct texture_task_params *p = NULL;
{
sys_mutex_lock(&L.params.mutex);
sys_mutex_lock(&G.params.mutex);
{
if (L.params.head_free) {
p = L.params.head_free;
L.params.head_free = p->next_free;
if (G.params.head_free) {
p = G.params.head_free;
G.params.head_free = p->next_free;
} else {
p = arena_push_zero(&L.params.arena, struct texture_task_params);
p = arena_push_zero(&G.params.arena, struct texture_task_params);
}
}
sys_mutex_unlock(&L.params.mutex);
sys_mutex_unlock(&G.params.mutex);
}
return p;
}
INTERNAL void texture_task_params_release(struct texture_task_params *p)
{
sys_mutex_lock(&L.params.mutex);
sys_mutex_lock(&G.params.mutex);
{
p->next_free = L.params.head_free;
L.params.head_free = p;
p->next_free = G.params.head_free;
G.params.head_free = p;
}
sys_mutex_unlock(&L.params.mutex);
sys_mutex_unlock(&G.params.mutex);
}
/* ========================== *

View File

@ -13,11 +13,11 @@ GLOBAL struct {
struct atomic_i64 metas_lock_flag;
u64 metas_count;
struct thread_local_var_meta metas[MAX_THREAD_LOCAL_VARS];
} L = { 0 }, DEBUG_ALIAS(L, L_thread_local);
} G = { 0 }, DEBUG_ALIAS(G, G_thread_local);
INTERNAL void metas_lock(void)
{
while (atomic_i64_eval_compare_exchange(&L.metas_lock_flag, 0, 1) == 0) {
while (atomic_i64_eval_compare_exchange(&G.metas_lock_flag, 0, 1) == 0) {
/* Spinlock */
ix_pause();
}
@ -25,7 +25,7 @@ INTERNAL void metas_lock(void)
INTERNAL void metas_unlock(void)
{
atomic_i64_eval_exchange(&L.metas_lock_flag, 0);
atomic_i64_eval_exchange(&G.metas_lock_flag, 0);
}
struct thread_local_store thread_local_store_alloc(void)
@ -47,7 +47,7 @@ void thread_local_store_release(struct thread_local_store *t)
for (u64 i = t->allocation_order_count; i-- > 0;) {
u64 id = t->allocation_order[i];
void *data = t->lookup[id];
struct thread_local_var_meta *meta = &L.metas[id];
struct thread_local_var_meta *meta = &G.metas[id];
if (meta->release) {
meta->release(data);
}
@ -70,12 +70,12 @@ void *_thread_local_eval(struct thread_local_var_meta *meta)
{
id_plus_one = atomic_u64_eval(&meta->id_plus_one); /* Re-check now that locked */
if (id_plus_one == 0) {
id = L.metas_count++;
id = G.metas_count++;
if (id >= MAX_THREAD_LOCAL_VARS) {
sys_panic_raw("Maximum number of thread local variables reached");
}
atomic_u64_eval_exchange(&meta->id_plus_one, id + 1);
L.metas[id] = *meta;
G.metas[id] = *meta;
} else {
id = id_plus_one - 1;
}

View File

@ -31,7 +31,7 @@ extern "C"
GLOBAL struct {
/* FIXME: Do we need to wrap this in a mutex? */
IDWriteFactory5 *factory;
} L = { 0 }, DEBUG_ALIAS(L, L_ttf_dwrite);
} G = { 0 }, DEBUG_ALIAS(G, G_ttf_dwrite);
/* ========================== *
* Decode font
@ -49,7 +49,7 @@ INTERNAL i32 round_up(f32 x)
/* Call this during font system startup */
struct ttf_startup_receipt ttf_startup(void)
{
ASSERT(!L.factory);
ASSERT(!G.factory);
/* FIXME: I think IDWriteFactory5 only exists on later updates of windows
* 10? Need to verify. Maybe should just use a custom loader. (We're only
* using a factory5 since I think WriteInMemoryFileLoader wasn't
@ -59,7 +59,7 @@ struct ttf_startup_receipt ttf_startup(void)
HRESULT error = DWriteCreateFactory(
DWRITE_FACTORY_TYPE_SHARED,
__uuidof(IDWriteFactory5),
(IUnknown **)&L.factory
(IUnknown **)&G.factory
);
#pragma clang diagnostic pop
if (error) {
@ -74,7 +74,7 @@ struct ttf_decode_result ttf_decode(struct arena *arena, struct buffer encoded,
COLORREF bg_color = RGB(0,0,0);
COLORREF fg_color = RGB(255,255,255);
IDWriteFactory5 *factory = L.factory;
IDWriteFactory5 *factory = G.factory;
/* TODO: handle errors */
HRESULT error = 0;
@ -335,7 +335,7 @@ extern "C"
GLOBAL struct {
/* FIXME: Do we need to wrap this in a mutex? */
IDWriteFactory5 *factory;
} L = { 0 }, DEBUG_ALIAS(L, L_ttf_dwrite);
} G = { 0 }, DEBUG_ALIAS(G, L_ttf_dwrite);
/* ========================== *
* Decode font
@ -353,7 +353,7 @@ INTERNAL i32 round_up(f32 x)
/* Call this during font system startup */
struct ttf_startup_receipt ttf_startup(void)
{
ASSERT(!L.factory);
ASSERT(!G.factory);
/* FIXME: I think IDWriteFactory5 only exists on later updates of windows
* 10? Need to verify. Maybe should just use a custom loader. (We're only
* using a factory5 since I think WriteInMemoryFileLoader wasn't
@ -363,7 +363,7 @@ struct ttf_startup_receipt ttf_startup(void)
HRESULT error = DWriteCreateFactory(
DWRITE_FACTORY_TYPE_SHARED,
__uuidof(IDWriteFactory5),
(IUnknown **)&L.factory
(IUnknown **)&G.factory
);
#pragma clang diagnostic pop
if (error) {
@ -378,7 +378,7 @@ struct ttf_decode_result ttf_decode(struct arena *arena, struct buffer encoded,
COLORREF bg_color = RGB(0,0,0);
COLORREF fg_color = RGB(255,255,255);
IDWriteFactory5 *factory = L.factory;
IDWriteFactory5 *factory = G.factory;
/* TODO: handle errors */
HRESULT error = 0;

View File

@ -85,7 +85,7 @@ GLOBAL struct {
struct v2 viewport_center;
struct v2 viewport_cursor;
struct v2 world_cursor;
} L = { 0 }, DEBUG_ALIAS(L, L_user);
} G = { 0 }, DEBUG_ALIAS(G, G_user);
/* ========================== *
* Bind state
@ -118,26 +118,26 @@ GLOBAL READONLY enum user_bind_kind g_binds[SYS_BTN_COUNT] = {
INTERNAL struct sys_event_array pop_sys_events(struct arena *arena)
{
struct sys_event_array array = { 0 };
sys_mutex_lock(&L.sys_events_mutex);
sys_mutex_lock(&G.sys_events_mutex);
{
struct buffer events_buff = arena_to_buffer(&L.sys_events_arena);
struct buffer events_buff = arena_to_buffer(&G.sys_events_arena);
arena_align(arena, alignof(struct sys_event));
array.events = (struct sys_event *)arena_push_array(arena, u8, events_buff.size);
array.count = events_buff.size / sizeof(struct sys_event);
MEMCPY(array.events, events_buff.data, events_buff.size);
arena_reset(&L.sys_events_arena);
arena_reset(&G.sys_events_arena);
}
sys_mutex_unlock(&L.sys_events_mutex);
sys_mutex_unlock(&G.sys_events_mutex);
return array;
}
INTERNAL SYS_WINDOW_EVENT_CALLBACK_DEF(window_event_callback, event)
{
sys_mutex_lock(&L.sys_events_mutex);
sys_mutex_lock(&G.sys_events_mutex);
{
*arena_push(&L.sys_events_arena, struct sys_event) = event;
*arena_push(&G.sys_events_arena, struct sys_event) = event;
}
sys_mutex_unlock(&L.sys_events_mutex);
sys_mutex_unlock(&G.sys_events_mutex);
}
/* ========================== *
@ -147,21 +147,21 @@ INTERNAL SYS_WINDOW_EVENT_CALLBACK_DEF(window_event_callback, event)
INTERNAL struct blend_tick *blend_tick_alloc(void)
{
struct blend_tick *bt = NULL;
if (L.head_free_blend_tick) {
bt = L.head_free_blend_tick;
L.head_free_blend_tick = bt->next;
if (G.head_free_blend_tick) {
bt = G.head_free_blend_tick;
G.head_free_blend_tick = bt->next;
*bt = (struct blend_tick) {
.world = bt->world
};
} else {
bt = arena_push_zero(&L.arena, struct blend_tick);
bt = arena_push_zero(&G.arena, struct blend_tick);
world_alloc(&bt->world);
}
if (L.head_blend_tick) {
bt->next = L.head_blend_tick;
L.head_blend_tick->prev = bt;
if (G.head_blend_tick) {
bt->next = G.head_blend_tick;
G.head_blend_tick->prev = bt;
}
L.head_blend_tick = bt;
G.head_blend_tick = bt;
return bt;
}
@ -177,14 +177,14 @@ INTERNAL void blend_tick_release(struct blend_tick *bt)
if (prev) {
prev->next = next;
}
if (bt == L.head_blend_tick) {
L.head_blend_tick = next;
if (bt == G.head_blend_tick) {
G.head_blend_tick = next;
}
/* Add to free list */
bt->next = L.head_free_blend_tick;
bt->next = G.head_free_blend_tick;
bt->prev = NULL;
L.head_free_blend_tick = bt;
G.head_free_blend_tick = bt;
}
struct interp_ticks {
@ -198,7 +198,7 @@ INTERNAL struct interp_ticks pull_ticks(f64 blend_time)
/* Find newest stored tick */
struct world *newest_tick = NULL;
for (struct blend_tick *bt = L.head_blend_tick; bt; bt = bt->next) {
for (struct blend_tick *bt = G.head_blend_tick; bt; bt = bt->next) {
if (!newest_tick || bt->world.tick_id > newest_tick->tick_id) {
newest_tick = &bt->world;
}
@ -213,7 +213,7 @@ INTERNAL struct interp_ticks pull_ticks(f64 blend_time)
/* Find oldest tick */
struct world *oldest_tick = NULL;
for (struct blend_tick *bt = L.head_blend_tick; bt; bt = bt->next) {
for (struct blend_tick *bt = G.head_blend_tick; bt; bt = bt->next) {
if (!oldest_tick || bt->world.tick_id < oldest_tick->tick_id) {
oldest_tick = &bt->world;
}
@ -222,7 +222,7 @@ INTERNAL struct interp_ticks pull_ticks(f64 blend_time)
/* Find closest ticks to blend time */
struct world *from_tick = oldest_tick;
struct world *to_tick = newest_tick;
for (struct blend_tick *bt = L.head_blend_tick; bt; bt = bt->next) {
for (struct blend_tick *bt = G.head_blend_tick; bt; bt = bt->next) {
f64 bt_time = sys_timestamp_seconds(bt->world.tick_ts);
if (bt_time < blend_time && bt_time > sys_timestamp_seconds(from_tick->tick_ts)) {
@ -240,7 +240,7 @@ INTERNAL struct interp_ticks pull_ticks(f64 blend_time)
struct blend_tick **bts_to_free = arena_dry_push(scratch.arena, struct blend_tick *);
u64 bts_to_free_count = 0;
for (struct blend_tick *bt = L.head_blend_tick; bt; bt = bt->next) {
for (struct blend_tick *bt = G.head_blend_tick; bt; bt = bt->next) {
f64 bt_time = sys_timestamp_seconds(bt->world.tick_ts);
if (bt_time < sys_timestamp_seconds(from_tick->tick_ts)) {
*arena_push(scratch.arena, struct blend_tick *) = bt;
@ -321,16 +321,16 @@ INTERNAL void debug_draw_xform(struct xform xf)
u32 color_x = RGBA_F(1, 0, 0, 0.3);
u32 color_y = RGBA_F(0, 1, 0, 0.3);
struct v2 pos = xform_mul_v2(L.world_view, xf.og);
struct v2 x_ray = xform_basis_mul_v2(L.world_view, xform_get_right(xf));
struct v2 y_ray = xform_basis_mul_v2(L.world_view, xform_get_up(xf));
struct v2 pos = xform_mul_v2(G.world_view, xf.og);
struct v2 x_ray = xform_basis_mul_v2(G.world_view, xform_get_right(xf));
struct v2 y_ray = xform_basis_mul_v2(G.world_view, xform_get_up(xf));
struct quad quad = quad_from_rect(RECT(0, 0, 1, -1));
quad = quad_mul_xform(quad_scale(quad, 0.075), xf);
draw_solid_arrow_ray(L.viewport_canvas, pos, x_ray, thickness, arrowhead_len, color_x);
draw_solid_arrow_ray(L.viewport_canvas, pos, y_ray, thickness, arrowhead_len, color_y);
draw_solid_quad(L.viewport_canvas, quad, color);
draw_solid_arrow_ray(G.viewport_canvas, pos, x_ray, thickness, arrowhead_len, color_x);
draw_solid_arrow_ray(G.viewport_canvas, pos, y_ray, thickness, arrowhead_len, color_y);
draw_solid_quad(G.viewport_canvas, quad, color);
}
/* TODO: remove this (testing) */
@ -342,12 +342,12 @@ INTERNAL void debug_draw_movement(struct entity *ent)
u32 color_vel = RGBA_F(1, 0.5, 0, 1);
u32 color_acc = RGBA_F(1, 1, 0.5, 1);
struct v2 pos = xform_mul_v2(L.world_view, ent->world_xform.og);
struct v2 vel_ray = xform_basis_mul_v2(L.world_view, ent->velocity);
struct v2 acc_ray = xform_basis_mul_v2(L.world_view, ent->acceleration);
struct v2 pos = xform_mul_v2(G.world_view, ent->world_xform.og);
struct v2 vel_ray = xform_basis_mul_v2(G.world_view, ent->velocity);
struct v2 acc_ray = xform_basis_mul_v2(G.world_view, ent->acceleration);
draw_solid_arrow_ray(L.viewport_canvas, pos, vel_ray, thickness, arrow_len, color_vel);
draw_solid_arrow_ray(L.viewport_canvas, pos, acc_ray, thickness, arrow_len, color_acc);
draw_solid_arrow_ray(G.viewport_canvas, pos, vel_ray, thickness, arrow_len, color_vel);
draw_solid_arrow_ray(G.viewport_canvas, pos, acc_ray, thickness, arrow_len, color_acc);
}
INTERNAL void user_update(void)
@ -359,9 +359,9 @@ INTERNAL void user_update(void)
/* Get time */
f64 cur_time = sys_timestamp_seconds(sys_timestamp());
L.dt = max_f64(0.0, cur_time - L.time);
L.time += L.dt;
L.screen_size = sys_window_get_size(L.window);
G.dt = max_f64(0.0, cur_time - G.time);
G.time += G.dt;
G.screen_size = sys_window_get_size(G.window);
/* ========================== *
* Produce interpolated tick
@ -373,7 +373,7 @@ INTERNAL void user_update(void)
#if USER_INTERP_ENABLED
f64 blend_time_offset = (1.0 / GAME_FPS) * USER_INTERP_OFFSET_TICK_RATIO;
f64 blend_time = L.time > blend_time_offset ? L.time - blend_time_offset : 0;
f64 blend_time = G.time > blend_time_offset ? G.time - blend_time_offset : 0;
/* Pull ticks */
struct interp_ticks interp_ticks = pull_ticks(blend_time);
@ -392,15 +392,15 @@ INTERNAL void user_update(void)
tick_blend = clamp_f32(tick_blend, 0.0f, 1.0f);
}
world_copy_replace(&L.world, t1);
world_copy_replace(&G.world, t1);
/* Blend time */
L.world.time = math_lerp_f64(t0->time, t1->time, (f64)tick_blend);
G.world.time = math_lerp_f64(t0->time, t1->time, (f64)tick_blend);
/* Blend entities */
struct entity_array t0_entities = entity_store_as_array(&t0->entity_store);
struct entity_array t1_entities = entity_store_as_array(&t1->entity_store);
struct entity_array world_entities = entity_store_as_array(&L.world.entity_store);
struct entity_array world_entities = entity_store_as_array(&G.world.entity_store);
u64 num_entities = min_u64(t0_entities.count, t1_entities.count);
for (u64 i = 0; i < num_entities; ++i) {
@ -424,12 +424,12 @@ INTERNAL void user_update(void)
}
}
#else
struct interp_ticks interp_ticks = pull_ticks(L.time);
world_copy_replace(&L.world, interp_ticks.to_tick);
tick_is_first_frame = L.world.tick_id == 0;
struct interp_ticks interp_ticks = pull_ticks(G.time);
world_copy_replace(&G.world, interp_ticks.to_tick);
tick_is_first_frame = G.world.tick_id == 0;
#endif
}
struct entity_array entities_array = entity_store_as_array(&L.world.entity_store);
struct entity_array entities_array = entity_store_as_array(&G.world.entity_store);
/* ========================== *
* Find important entities
@ -460,9 +460,9 @@ INTERNAL void user_update(void)
struct sys_event_array events = pop_sys_events(scratch.arena);
/* Reset bind states "was_pressed" */
for (u32 i = 0; i < ARRAY_COUNT(L.bind_states); ++i) {
L.bind_states[i] = (struct bind_state) {
.is_held = L.bind_states[i].is_held
for (u32 i = 0; i < ARRAY_COUNT(G.bind_states); ++i) {
G.bind_states[i] = (struct bind_state) {
.is_held = G.bind_states[i].is_held
};
}
@ -484,7 +484,7 @@ INTERNAL void user_update(void)
/* Update mouse pos */
if (event->kind == SYS_EVENT_KIND_CURSOR_MOVE) {
L.screen_cursor = event->cursor_position;
G.screen_cursor = event->cursor_position;
}
/* Update bind states */
@ -495,17 +495,17 @@ INTERNAL void user_update(void)
if (bind) {
b32 pressed = event->kind == SYS_EVENT_KIND_BUTTON_DOWN;
b32 out_of_bounds = button >= SYS_BTN_M1 && button <= SYS_BTN_M5 &&
(L.viewport_cursor.x < 0 ||
L.viewport_cursor.y < 0 ||
L.viewport_cursor.x > L.viewport_size.x ||
L.viewport_cursor.y > L.viewport_size.y);
L.bind_states[bind].is_held = pressed && !out_of_bounds;
(G.viewport_cursor.x < 0 ||
G.viewport_cursor.y < 0 ||
G.viewport_cursor.x > G.viewport_size.x ||
G.viewport_cursor.y > G.viewport_size.y);
G.bind_states[bind].is_held = pressed && !out_of_bounds;
if (pressed) {
if (!out_of_bounds) {
++L.bind_states[bind].num_presses;
++G.bind_states[bind].num_presses;
}
} else {
++L.bind_states[bind].num_releases;
++G.bind_states[bind].num_releases;
}
}
}
@ -532,14 +532,14 @@ INTERNAL void user_update(void)
}
}
/* Paste test */
if (event->kind == SYS_EVENT_KIND_BUTTON_DOWN && event->button == SYS_BTN_V && L.bind_states[USER_BIND_KIND_CTRL_TEST].is_held) {
if (event->kind == SYS_EVENT_KIND_BUTTON_DOWN && event->button == SYS_BTN_V && G.bind_states[USER_BIND_KIND_CTRL_TEST].is_held) {
struct string clipboard = sys_get_clipboard_text(scratch.arena);
u64 copy_size = min_u64(clipboard.len, ARRAY_COUNT(test_input_array) - test_input_array_pos - 4);
MEMCPY(&test_input_array[test_input_array_pos], clipboard.text, copy_size);
test_input_array_pos += copy_size;
}
/* Copy test */
if (event->kind == SYS_EVENT_KIND_BUTTON_DOWN && event->button == SYS_BTN_C && L.bind_states[USER_BIND_KIND_CTRL_TEST].is_held) {
if (event->kind == SYS_EVENT_KIND_BUTTON_DOWN && event->button == SYS_BTN_C && G.bind_states[USER_BIND_KIND_CTRL_TEST].is_held) {
struct string src = { .text = test_input_array, .len = test_input_array_pos };
sys_set_clipboard_text(src);
}
@ -551,17 +551,17 @@ INTERNAL void user_update(void)
/* Test fullscreen */
{
struct bind_state state = L.bind_states[USER_BIND_KIND_FULLSCREEN];
struct bind_state state = G.bind_states[USER_BIND_KIND_FULLSCREEN];
if (state.num_presses) {
struct sys_window_settings settings = sys_window_get_settings(L.window);
struct sys_window_settings settings = sys_window_get_settings(G.window);
settings.flags ^= SYS_WINDOW_SETTINGS_FLAG_FULLSCREEN;
sys_window_update_settings(L.window, &settings);
sys_window_update_settings(G.window, &settings);
}
}
/* Test clear world */
{
struct bind_state state = L.bind_states[USER_BIND_KIND_DEBUG_CLEAR];
struct bind_state state = G.bind_states[USER_BIND_KIND_DEBUG_CLEAR];
if (state.num_presses || state.is_held) {
queue_game_cmd(&cmd_list, (struct game_cmd) {
.kind = GAME_CMD_KIND_CLEAR_ALL
@ -569,12 +569,12 @@ INTERNAL void user_update(void)
}
}
if (L.bind_states[USER_BIND_KIND_DEBUG_DRAW].num_presses > 0) {
L.debug_draw = !L.debug_draw;
if (G.bind_states[USER_BIND_KIND_DEBUG_DRAW].num_presses > 0) {
G.debug_draw = !G.debug_draw;
}
if (L.bind_states[USER_BIND_KIND_DEBUG_CAMERA].num_presses > 0) {
L.debug_camera = !L.debug_camera;
if (G.bind_states[USER_BIND_KIND_DEBUG_CAMERA].num_presses > 0) {
G.debug_camera = !G.debug_camera;
}
@ -583,9 +583,9 @@ INTERNAL void user_update(void)
* ========================== */
/* Calculate screen viewport dimensions */
if (L.debug_camera) {
L.viewport_size = L.screen_size;
L.viewport_screen_offset = V2(0, 0);
if (G.debug_camera) {
G.viewport_size = G.screen_size;
G.viewport_screen_offset = V2(0, 0);
} else {
/* Determine viewport size by camera & window dimensions */
@ -597,63 +597,63 @@ INTERNAL void user_update(void)
aspect_ratio = camera_size.x / camera_size.y;
}
}
f32 width = L.screen_size.x;
f32 height = L.screen_size.y;
f32 width = G.screen_size.x;
f32 height = G.screen_size.y;
if (width / height > aspect_ratio) {
width = height * aspect_ratio;
} else {
height = (f32)math_ceil(width / aspect_ratio);
}
L.viewport_size = V2(width, height);
G.viewport_size = V2(width, height);
/* Center viewport in window */
f32 x = 0;
f32 y = 0;
x = math_round(L.screen_size.x / 2 - width / 2);
y = math_round(L.screen_size.y / 2 - height / 2);
L.viewport_screen_offset = V2(x, y);
x = math_round(G.screen_size.x / 2 - width / 2);
y = math_round(G.screen_size.y / 2 - height / 2);
G.viewport_screen_offset = V2(x, y);
}
L.viewport_center = v2_mul(L.viewport_size, 0.5);
L.viewport_cursor = v2_sub(L.screen_cursor, L.viewport_screen_offset);
G.viewport_center = v2_mul(G.viewport_size, 0.5);
G.viewport_cursor = v2_sub(G.screen_cursor, G.viewport_screen_offset);
/* ========================== *
* Update view
* ========================== */
if (L.debug_camera) {
L.world_view = xform_with_rotation(L.world_view, 0);
if (G.debug_camera) {
G.world_view = xform_with_rotation(G.world_view, 0);
/* Pan view */
if (L.bind_states[USER_BIND_KIND_PAN].is_held) {
if (!L.debug_camera_panning) {
L.debug_camera_pan_start = xform_invert_mul_v2(L.world_view, L.viewport_cursor);
if (G.bind_states[USER_BIND_KIND_PAN].is_held) {
if (!G.debug_camera_panning) {
G.debug_camera_pan_start = xform_invert_mul_v2(G.world_view, G.viewport_cursor);
}
L.debug_camera_panning = true;
struct v2 offset = v2_sub(L.debug_camera_pan_start, xform_invert_mul_v2(L.world_view, L.viewport_cursor));
L.world_view = xform_translate(L.world_view, v2_neg(offset));
L.debug_camera_pan_start = xform_invert_mul_v2(L.world_view, L.viewport_cursor);
G.debug_camera_panning = true;
struct v2 offset = v2_sub(G.debug_camera_pan_start, xform_invert_mul_v2(G.world_view, G.viewport_cursor));
G.world_view = xform_translate(G.world_view, v2_neg(offset));
G.debug_camera_pan_start = xform_invert_mul_v2(G.world_view, G.viewport_cursor);
} else {
L.debug_camera_panning = false;
G.debug_camera_panning = false;
}
/* Zoom view */
i32 input_zooms = L.bind_states[USER_BIND_KIND_ZOOM_IN].num_presses - L.bind_states[USER_BIND_KIND_ZOOM_OUT].num_presses;
i32 input_zooms = G.bind_states[USER_BIND_KIND_ZOOM_IN].num_presses - G.bind_states[USER_BIND_KIND_ZOOM_OUT].num_presses;
if (input_zooms != 0) {
/* Zoom to cursor */
f32 zoom_rate = 2;
f32 zoom = math_pow(zoom_rate, input_zooms);
struct v2 world_cursor = xform_invert_mul_v2(L.world_view, L.viewport_cursor);
L.world_view = xform_translate(L.world_view, world_cursor);
L.world_view = xform_scale(L.world_view, V2(zoom, zoom));
L.world_view = xform_translate(L.world_view, v2_neg(world_cursor));
struct v2 world_cursor = xform_invert_mul_v2(G.world_view, G.viewport_cursor);
G.world_view = xform_translate(G.world_view, world_cursor);
G.world_view = xform_scale(G.world_view, V2(zoom, zoom));
G.world_view = xform_translate(G.world_view, v2_neg(world_cursor));
}
} else {
struct v2 center = active_camera->world_xform.og;
f32 rot = xform_get_rotation(active_camera->world_xform);
/* Scale view into viewport based on camera size */
struct v2 size = L.viewport_size;
struct v2 size = G.viewport_size;
{
struct xform quad_xf = xform_mul(active_camera->world_xform, active_camera->camera_quad_xform);
struct v2 camera_size = xform_get_scale(quad_xf);
@ -664,17 +664,17 @@ INTERNAL void user_update(void)
f32 scale = min_f32(size.x, size.y);
struct trs trs = TRS(
.t = v2_sub(L.viewport_center, center),
.t = v2_sub(G.viewport_center, center),
.r = rot,
.s = V2(scale, scale)
);
struct v2 pivot = center;
L.world_view = XFORM_IDENT;
L.world_view = xform_translate(L.world_view, pivot);
L.world_view = xform_trs_pivot_rs(L.world_view, trs, pivot);
G.world_view = XFORM_IDENT;
G.world_view = xform_translate(G.world_view, pivot);
G.world_view = xform_trs_pivot_rs(G.world_view, trs, pivot);
}
L.world_cursor = xform_invert_mul_v2(L.world_view, L.viewport_cursor);
G.world_cursor = xform_invert_mul_v2(G.world_view, G.viewport_cursor);
/* ========================== *
* Update listener
@ -682,8 +682,8 @@ INTERNAL void user_update(void)
{
struct v2 up = V2(0, -1);
struct v2 listener_pos = xform_invert_mul_v2(L.world_view, L.viewport_center);
struct v2 listener_dir = v2_norm(xform_basis_invert_mul_v2(L.world_view, up));
struct v2 listener_pos = xform_invert_mul_v2(G.world_view, G.viewport_center);
struct v2 listener_dir = v2_norm(xform_basis_invert_mul_v2(G.world_view, up));
mixer_set_listener(listener_pos, listener_dir);
}
@ -693,7 +693,7 @@ INTERNAL void user_update(void)
{
u32 color = RGBA_F(0.2f, 0.2f, 0.2f, 1.f);
draw_solid_rect(L.viewport_bg_canvas, RECT(0, 0, L.viewport_size.x, L.viewport_size.y), color);
draw_solid_rect(G.viewport_bg_canvas, RECT(0, 0, G.viewport_size.x, G.viewport_size.y), color);
}
/* ========================== *
@ -712,25 +712,25 @@ INTERNAL void user_update(void)
i64 cols = 20;
/* Draw column lines */
struct v2 col_ray = xform_basis_mul_v2(L.world_view, V2(0, rows));
struct v2 col_ray = xform_basis_mul_v2(G.world_view, V2(0, rows));
for (i64 col = starty; col <= (starty + cols); ++col) {
u32 line_color = color;
if (col == 0) {
line_color = y_color;
}
struct v2 pos = xform_mul_v2(L.world_view, V2(col, starty));
draw_solid_ray(L.viewport_bg_canvas, pos, col_ray, thickness, line_color);
struct v2 pos = xform_mul_v2(G.world_view, V2(col, starty));
draw_solid_ray(G.viewport_bg_canvas, pos, col_ray, thickness, line_color);
}
struct v2 row_ray = xform_basis_mul_v2(L.world_view, V2(cols, 0));
struct v2 row_ray = xform_basis_mul_v2(G.world_view, V2(cols, 0));
for (i64 row = startx; row <= (startx + rows); ++row) {
u32 line_color = color;
if (row == 0) {
line_color = x_color;
}
struct v2 pos = xform_mul_v2(L.world_view, V2(startx, row));
draw_solid_ray(L.viewport_bg_canvas, pos, row_ray, thickness, line_color);
struct v2 pos = xform_mul_v2(G.world_view, V2(startx, row));
draw_solid_ray(G.viewport_bg_canvas, pos, row_ray, thickness, line_color);
}
}
@ -748,7 +748,7 @@ INTERNAL void user_update(void)
struct entity *ent = &entities_array.entities[entity_index];
if (!ent->valid) continue;
b32 skip_debug_draw = !L.debug_camera && ent == active_camera;
b32 skip_debug_draw = !G.debug_camera && ent == active_camera;
b32 skip_debug_draw_transform = ent == active_camera;
/* Draw sprite */
@ -787,15 +787,15 @@ INTERNAL void user_update(void)
params.clip = frame.clip;
}
draw_texture_quad(L.world_canvas, params, quad);
draw_texture_quad(G.world_canvas, params, quad);
#if 0
if (L.debug_draw && !skip_debug_draw) {
if (G.debug_draw && !skip_debug_draw) {
/* Debug draw sprite quad */
{
f32 thickness = 2.f;
u32 color = RGBA_F(1, 1, 0, 0.25);
draw_solid_quad_line(L.world_canvas, quad, (thickness / PIXELS_PER_UNIT / L.world_view.zoom), color);
draw_solid_quad_line(G.world_canvas, quad, (thickness / PIXELS_PER_UNIT / G.world_view.zoom), color);
}
/* Debug draw sprite transform */
@ -806,7 +806,7 @@ INTERNAL void user_update(void)
/* Debug draw sprite pivot */
{
u32 color = RGBA_F(1, 0, 0, 1);
draw_solid_circle(L.world_canvas, ent->world_xform.og, 0.02, color, 20);
draw_solid_circle(G.world_canvas, ent->world_xform.og, 0.02, color, 20);
}
}
#endif
@ -814,7 +814,7 @@ INTERNAL void user_update(void)
}
/* Debug draw info */
if (L.debug_draw && !skip_debug_draw) {
if (G.debug_draw && !skip_debug_draw) {
struct temp_arena temp = arena_temp_begin(scratch.arena);
#if 0
@ -827,7 +827,7 @@ INTERNAL void user_update(void)
f32 offset = 1;
struct v2 pos = v2_add(xf.og, v2_mul(V2(0, -1), offset));
pos = xform_mul_v2(L.world_view, pos);
pos = xform_mul_v2(G.world_view, pos);
pos = v2_round(pos);
struct string disp_name = ent->sprite_name;
@ -850,7 +850,7 @@ INTERNAL void user_update(void)
);
draw_text(L.viewport_canvas, disp_font, pos, text);
draw_text(G.viewport_canvas, disp_font, pos, text);
}
#endif
@ -861,15 +861,15 @@ INTERNAL void user_update(void)
}
/* Draw hierarchy */
struct entity *parent = entity_from_handle(&L.world.entity_store, ent->parent);
struct entity *parent = entity_from_handle(&G.world.entity_store, ent->parent);
if (parent->valid) {
u32 color = RGBA_F(0.6, 0.6, 1, 0.75);
f32 thickness = 5;
f32 arrow_height = 15;
struct v2 start = xform_mul_v2(L.world_view, ent->world_xform.og);
struct v2 end = xform_mul_v2(L.world_view, parent->world_xform.og);
draw_solid_arrow_line(L.viewport_canvas, start, end, thickness, arrow_height, color);
struct v2 start = xform_mul_v2(G.world_view, ent->world_xform.og);
struct v2 end = xform_mul_v2(G.world_view, parent->world_xform.og);
draw_solid_arrow_line(G.viewport_canvas, start, end, thickness, arrow_height, color);
}
/* Draw aim */
@ -877,9 +877,9 @@ INTERNAL void user_update(void)
u32 color = RGBA_F(0.75, 0, 0.75, 0.5);
f32 thickness = 3;
f32 arrow_height = 10;
struct v2 pos = xform_mul_v2(L.world_view, ent->world_xform.og);
struct v2 aim_ray = xform_basis_mul_v2(L.world_view, ent->player_aim);
draw_solid_arrow_ray(L.viewport_canvas, pos, aim_ray, thickness, arrow_height, color);
struct v2 pos = xform_mul_v2(G.world_view, ent->world_xform.og);
struct v2 aim_ray = xform_basis_mul_v2(G.world_view, ent->player_aim);
draw_solid_arrow_ray(G.viewport_canvas, pos, aim_ray, thickness, arrow_height, color);
}
/* Draw camera rect */
@ -890,9 +890,9 @@ INTERNAL void user_update(void)
struct xform quad_xf = xform_mul(ent->world_xform, ent->camera_quad_xform);
struct quad quad = quad_mul_xform(QUAD_UNIT_SQUARE_CENTERED, quad_xf);
quad = quad_mul_xform(quad, L.world_view);
quad = quad_mul_xform(quad, G.world_view);
draw_solid_quad_line(L.viewport_canvas, quad, thickness, color);
draw_solid_quad_line(G.viewport_canvas, quad, thickness, color);
}
arena_temp_end(temp);
@ -900,8 +900,8 @@ INTERNAL void user_update(void)
}
/* Draw crosshair or show cursor */
if (!L.debug_camera) {
struct v2 crosshair_pos = L.viewport_cursor;
if (!G.debug_camera) {
struct v2 crosshair_pos = G.viewport_cursor;
u32 tint = RGBA_F(1, 1, 1, 1);
struct v2 size = V2(0, 0);
@ -910,18 +910,18 @@ INTERNAL void user_update(void)
size = t->size;
struct xform xf = XFORM_TRS(.t = crosshair_pos, .s = size);
struct quad quad = quad_mul_xform(QUAD_UNIT_SQUARE_CENTERED, xf);
draw_texture_quad(L.viewport_canvas, DRAW_TEXTURE_PARAMS(.texture = t, .tint = tint), quad);
draw_texture_quad(G.viewport_canvas, DRAW_TEXTURE_PARAMS(.texture = t, .tint = tint), quad);
}
struct rect cursor_clip = RECT_FROM_V2(L.viewport_screen_offset, L.viewport_size);
struct rect cursor_clip = RECT_FROM_V2(G.viewport_screen_offset, G.viewport_size);
cursor_clip.pos = v2_add(cursor_clip.pos, v2_mul(size, 0.5f));
cursor_clip.pos = v2_add(cursor_clip.pos, V2(1, 1));
cursor_clip.size = v2_sub(cursor_clip.size, size);
sys_window_cursor_hide(L.window);
sys_window_cursor_enable_clip(L.window, cursor_clip);
sys_window_cursor_hide(G.window);
sys_window_cursor_enable_clip(G.window, cursor_clip);
} else {
sys_window_cursor_disable_clip(L.window);
sys_window_cursor_show(L.window);
sys_window_cursor_disable_clip(G.window);
sys_window_cursor_show(G.window);
}
/* ========================== *
@ -931,8 +931,8 @@ INTERNAL void user_update(void)
/* Movement */
struct v2 input_move_dir = { 0 };
{
for (enum user_bind_kind bind = 0; bind < (i32)ARRAY_COUNT(L.bind_states); ++bind) {
struct bind_state state = L.bind_states[bind];
for (enum user_bind_kind bind = 0; bind < (i32)ARRAY_COUNT(G.bind_states); ++bind) {
struct bind_state state = G.bind_states[bind];
if (!state.is_held && state.num_presses <= 0) {
continue;
@ -960,14 +960,14 @@ INTERNAL void user_update(void)
}
}
input_move_dir = xform_basis_invert_mul_v2(L.world_view, input_move_dir); /* Make move dir relative to world view */
input_move_dir = xform_basis_invert_mul_v2(G.world_view, input_move_dir); /* Make move dir relative to world view */
input_move_dir = v2_norm(input_move_dir);
}
/* Aim */
struct v2 input_aim = player->player_aim;
if (!L.debug_camera) {
input_aim = v2_sub(L.world_cursor, player->world_xform.og);
if (!G.debug_camera) {
input_aim = v2_sub(G.world_cursor, player->world_xform.og);
}
/* Queue cmd */
@ -981,47 +981,47 @@ INTERNAL void user_update(void)
/* ---------------------------------------------------------------------- */
/* Debug draw info */
if (L.debug_draw) {
if (G.debug_draw) {
struct temp_arena temp = arena_temp_begin(scratch.arena);
f32 spacing = 20;
struct v2 pos = V2(10, 8);
struct font *font = font_load(STR("res/fonts/fixedsys.ttf"), 12.0f);
draw_text(L.viewport_canvas, font, pos, string_format(temp.arena, STR("time: %F"), FMT_FLOAT((f64)L.time)));
draw_text(G.viewport_canvas, font, pos, string_format(temp.arena, STR("time: %F"), FMT_FLOAT((f64)G.time)));
pos.y += spacing;
draw_text(L.viewport_canvas, font, pos, string_format(temp.arena, STR("screen_size: (%F, %F)"), FMT_FLOAT((f64)L.screen_size.x), FMT_FLOAT((f64)L.screen_size.y)));
draw_text(G.viewport_canvas, font, pos, string_format(temp.arena, STR("screen_size: (%F, %F)"), FMT_FLOAT((f64)G.screen_size.x), FMT_FLOAT((f64)G.screen_size.y)));
pos.y += spacing;
draw_text(L.viewport_canvas, font, pos, string_format(temp.arena, STR("screen_cursor: (%F, %F)"), FMT_FLOAT((f64)L.screen_cursor.x), FMT_FLOAT((f64)L.screen_cursor.y)));
draw_text(G.viewport_canvas, font, pos, string_format(temp.arena, STR("screen_cursor: (%F, %F)"), FMT_FLOAT((f64)G.screen_cursor.x), FMT_FLOAT((f64)G.screen_cursor.y)));
pos.y += spacing;
draw_text(L.viewport_canvas, font, pos, string_format(temp.arena, STR("viewport_screen_offset: (%F, %F)"), FMT_FLOAT((f64)L.viewport_screen_offset.x), FMT_FLOAT((f64)L.viewport_screen_offset.y)));
draw_text(G.viewport_canvas, font, pos, string_format(temp.arena, STR("viewport_screen_offset: (%F, %F)"), FMT_FLOAT((f64)G.viewport_screen_offset.x), FMT_FLOAT((f64)G.viewport_screen_offset.y)));
pos.y += spacing;
draw_text(L.viewport_canvas, font, pos, string_format(temp.arena, STR("viewport_size: (%F, %F)"), FMT_FLOAT((f64)L.viewport_size.x), FMT_FLOAT((f64)L.viewport_size.y)));
draw_text(G.viewport_canvas, font, pos, string_format(temp.arena, STR("viewport_size: (%F, %F)"), FMT_FLOAT((f64)G.viewport_size.x), FMT_FLOAT((f64)G.viewport_size.y)));
pos.y += spacing;
draw_text(L.viewport_canvas, font, pos, string_format(temp.arena, STR("viewport_center: (%F, %F)"), FMT_FLOAT((f64)L.viewport_center.x), FMT_FLOAT((f64)L.viewport_center.y)));
draw_text(G.viewport_canvas, font, pos, string_format(temp.arena, STR("viewport_center: (%F, %F)"), FMT_FLOAT((f64)G.viewport_center.x), FMT_FLOAT((f64)G.viewport_center.y)));
pos.y += spacing;
draw_text(L.viewport_canvas, font, pos, string_format(temp.arena, STR("viewport_cursor: (%F, %F)"), FMT_FLOAT((f64)L.viewport_cursor.x), FMT_FLOAT((f64)L.viewport_cursor.y)));
draw_text(G.viewport_canvas, font, pos, string_format(temp.arena, STR("viewport_cursor: (%F, %F)"), FMT_FLOAT((f64)G.viewport_cursor.x), FMT_FLOAT((f64)G.viewport_cursor.y)));
pos.y += spacing;
draw_text(L.viewport_canvas, font, pos, string_format(temp.arena, STR("world_view.og: (%F, %F)"), FMT_FLOAT((f64)L.world_view.og.x), FMT_FLOAT((f64)L.world_view.og.y)));
draw_text(G.viewport_canvas, font, pos, string_format(temp.arena, STR("world_view.og: (%F, %F)"), FMT_FLOAT((f64)G.world_view.og.x), FMT_FLOAT((f64)G.world_view.og.y)));
pos.y += spacing;
draw_text(L.viewport_canvas, font, pos, string_format(temp.arena, STR("world_view rotation: %F"), FMT_FLOAT((f64)xform_get_rotation(L.world_view))));
draw_text(G.viewport_canvas, font, pos, string_format(temp.arena, STR("world_view rotation: %F"), FMT_FLOAT((f64)xform_get_rotation(G.world_view))));
pos.y += spacing;
draw_text(L.viewport_canvas, font, pos, string_format(temp.arena, STR("world_view scale: (%F, %F)"), FMT_FLOAT((f64)xform_get_scale(L.world_view).x), FMT_FLOAT((f64)xform_get_scale(L.world_view).x)));
draw_text(G.viewport_canvas, font, pos, string_format(temp.arena, STR("world_view scale: (%F, %F)"), FMT_FLOAT((f64)xform_get_scale(G.world_view).x), FMT_FLOAT((f64)xform_get_scale(G.world_view).x)));
pos.y += spacing;
draw_text(L.viewport_canvas, font, pos, string_format(temp.arena, STR("world_cursor: (%F, %F)"), FMT_FLOAT((f64)L.world_cursor.x), FMT_FLOAT((f64)L.world_cursor.y)));
draw_text(G.viewport_canvas, font, pos, string_format(temp.arena, STR("world_cursor: (%F, %F)"), FMT_FLOAT((f64)G.world_cursor.x), FMT_FLOAT((f64)G.world_cursor.y)));
pos.y += spacing;
draw_text(L.viewport_canvas, font, pos, string_format(temp.arena, STR("debug_camera: %F"), FMT_STR(L.debug_camera ? STR("true") : STR("false"))));
draw_text(G.viewport_canvas, font, pos, string_format(temp.arena, STR("debug_camera: %F"), FMT_STR(G.debug_camera ? STR("true") : STR("false"))));
pos.y += spacing;
arena_temp_end(temp);
@ -1038,7 +1038,7 @@ INTERNAL void user_update(void)
.size = font->texture.size
};
struct draw_texture_params dparams = DRAW_TEXTURE_PARAMS(.texture = &font->texture);
draw_texture_rect(L.viewport_canvas, dparams, atlas_rect);
draw_texture_rect(G.viewport_canvas, dparams, atlas_rect);
#endif
@ -1074,9 +1074,9 @@ INTERNAL void user_update(void)
}
}
//struct v2 pos = v2_round(V2(0, L.viewport_size.y / 2));
//struct v2 pos = v2_round(V2(0, G.viewport_size.y / 2));
struct v2 pos = v2_round(V2(0, 0));
draw_text(L.viewport_canvas, font, pos, draw_str);
draw_text(G.viewport_canvas, font, pos, draw_str);
}
/* Push game cmds */
@ -1087,14 +1087,14 @@ INTERNAL void user_update(void)
* ========================== */
/* Send canvases to GPU */
renderer_canvas_send_to_gpu(L.viewport_bg_canvas);
renderer_canvas_send_to_gpu(L.world_canvas);
renderer_canvas_send_to_gpu(L.viewport_canvas);
renderer_canvas_send_to_gpu(G.viewport_bg_canvas);
renderer_canvas_send_to_gpu(G.world_canvas);
renderer_canvas_send_to_gpu(G.viewport_canvas);
/* Set canvas views before presenting */
renderer_canvas_set_view(L.viewport_bg_canvas, XFORM_IDENT);
renderer_canvas_set_view(L.world_canvas, L.world_view);
renderer_canvas_set_view(L.viewport_canvas, XFORM_IDENT);
renderer_canvas_set_view(G.viewport_bg_canvas, XFORM_IDENT);
renderer_canvas_set_view(G.world_canvas, G.world_view);
renderer_canvas_set_view(G.viewport_canvas, XFORM_IDENT);
/* Present */
i32 vsync = VSYNC_ENABLED;
@ -1103,22 +1103,22 @@ INTERNAL void user_update(void)
u64 canvases_count = 0;
{
/* Viewport background canvas */
*arena_push(scratch.arena, struct renderer_canvas *) = L.viewport_bg_canvas;
*arena_push(scratch.arena, struct renderer_canvas *) = G.viewport_bg_canvas;
++canvases_count;
/* World canvas */
if (!tick_is_first_frame) {
/* Only render world if not on first frame */
*arena_push(scratch.arena, struct renderer_canvas *) = L.world_canvas;
*arena_push(scratch.arena, struct renderer_canvas *) = G.world_canvas;
++canvases_count;
}
/* Viewport canvas */
*arena_push(scratch.arena, struct renderer_canvas *) = L.viewport_canvas;
*arena_push(scratch.arena, struct renderer_canvas *) = G.viewport_canvas;
++canvases_count;
}
renderer_canvas_present(canvases, canvases_count, L.screen_size, RECT_FROM_V2(L.viewport_screen_offset, L.viewport_size), vsync);
renderer_canvas_present(canvases, canvases_count, G.screen_size, RECT_FROM_V2(G.viewport_screen_offset, G.viewport_size), vsync);
scratch_end(scratch);
}
@ -1134,7 +1134,7 @@ INTERNAL SYS_THREAD_FUNC_DEF(user_thread_entry_point, arg)
sys_timestamp_t last_frame_ts = 0;
f64 target_dt = USER_FRAME_LIMIT > (0) ? (1.0 / USER_FRAME_LIMIT) : 0;
while (!L.shutdown) {
while (!G.shutdown) {
__profscope(user_update_w_sleep);
sleep_frame(last_frame_ts, target_dt);
last_frame_ts = sys_timestamp();
@ -1161,29 +1161,29 @@ struct user_startup_receipt user_startup(struct work_startup_receipt *work_sr,
(UNUSED)asset_cache_sr;
(UNUSED)mixer_sr;
L.arena = arena_alloc(GIGABYTE(64));
G.arena = arena_alloc(GIGABYTE(64));
L.sys_events_mutex = sys_mutex_alloc();
L.sys_events_arena = arena_alloc(GIGABYTE(64));
G.sys_events_mutex = sys_mutex_alloc();
G.sys_events_arena = arena_alloc(GIGABYTE(64));
world_alloc(&L.world);
world_alloc(&G.world);
L.world_canvas = renderer_canvas_alloc();
L.world_view = XFORM_TRS(.t = V2(0, 0), .r = 0, .s = V2(PIXELS_PER_UNIT, PIXELS_PER_UNIT));
G.world_canvas = renderer_canvas_alloc();
G.world_view = XFORM_TRS(.t = V2(0, 0), .r = 0, .s = V2(PIXELS_PER_UNIT, PIXELS_PER_UNIT));
L.viewport_bg_canvas = renderer_canvas_alloc();
L.viewport_canvas = renderer_canvas_alloc();
G.viewport_bg_canvas = renderer_canvas_alloc();
G.viewport_canvas = renderer_canvas_alloc();
L.window = window;
sys_window_register_event_callback(L.window, &window_event_callback);
G.window = window;
sys_window_register_event_callback(G.window, &window_event_callback);
L.user_thread = sys_thread_init(&user_thread_entry_point, NULL, STR("[P1] User thread"));
G.user_thread = sys_thread_init(&user_thread_entry_point, NULL, STR("[P1] User thread"));
return (struct user_startup_receipt) { 0 };
}
void user_shutdown(void)
{
L.shutdown = true;
sys_thread_join(&L.user_thread);
G.shutdown = true;
sys_thread_join(&G.user_thread);
}

View File

@ -23,7 +23,7 @@
*/
/* NOTE:
* Functions suffixed with "assume_locked" require `L.mutex` to be
* Functions suffixed with "assume_locked" require `G.mutex` to be
* locked & unlocked by the caller.
*/
@ -86,7 +86,7 @@ GLOBAL struct {
/* Pointers to the last piece of work of each priority in the scheduled
* work list (used for O(1) insertion) */
struct work *scheduled_work_priority_tails[NUM_WORK_PRIORITIES];
} L = { 0 }, DEBUG_ALIAS(L, L_work);
} G = { 0 }, DEBUG_ALIAS(G, G_work);
/* ========================== *
* Thread local state
@ -112,15 +112,15 @@ struct work_startup_receipt work_startup(u32 num_worker_threads)
sys_panic(STR("Tried to start up worker pool with 0 threads"));
}
L.arena = arena_alloc(GIGABYTE(64));
L.mutex = sys_mutex_alloc();
L.semaphore = sys_semaphore_alloc(num_worker_threads);
L.worker_count = num_worker_threads;
L.idle_worker_count = num_worker_threads;
G.arena = arena_alloc(GIGABYTE(64));
G.mutex = sys_mutex_alloc();
G.semaphore = sys_semaphore_alloc(num_worker_threads);
G.worker_count = num_worker_threads;
G.idle_worker_count = num_worker_threads;
/* Initialize threads */
{
sys_mutex_lock(&L.mutex);
sys_mutex_lock(&G.mutex);
{
struct worker *prev = NULL;
for (u32 i = 0; i < num_worker_threads; ++i) {
@ -128,17 +128,17 @@ struct work_startup_receipt work_startup(u32 num_worker_threads)
STR("[P0] Worker %F"),
FMT_UINT(i));
struct worker *worker = arena_push_zero(&L.arena, struct worker);
struct worker *worker = arena_push_zero(&G.arena, struct worker);
worker->thread = sys_thread_init(&worker_thread_entry_point, NULL, thread_name);
if (prev) {
prev->next = worker;
} else {
L.worker_head = worker;
G.worker_head = worker;
}
prev = worker;
}
}
sys_mutex_unlock(&L.mutex);
sys_mutex_unlock(&G.mutex);
}
scratch_end(scratch);
@ -148,10 +148,10 @@ struct work_startup_receipt work_startup(u32 num_worker_threads)
void work_shutdown(void)
{
L.shutdown = true;
G.shutdown = true;
WRITE_BARRIER();
sys_semaphore_signal(&L.semaphore, L.worker_count);
for (struct worker *worker = L.worker_head; (worker = worker->next);) {
sys_semaphore_signal(&G.semaphore, G.worker_count);
for (struct worker *worker = G.worker_head; (worker = worker->next);) {
sys_thread_join(&worker->thread);
}
}
@ -163,21 +163,21 @@ void work_shutdown(void)
INTERNAL struct work *work_alloc_assume_locked(void)
{
__prof;
sys_mutex_assert_locked(&L.mutex);
sys_mutex_assert_locked(&G.mutex);
struct work *work = NULL;
/* Allocate work */
if (L.free_work_head) {
if (G.free_work_head) {
/* Reuse from free list */
work = L.free_work_head;
L.free_work_head = work->next_free;
work = G.free_work_head;
G.free_work_head = work->next_free;
*work = (struct work) {
.condition_variable_finished = work->condition_variable_finished,
.gen = work->gen
};
} else {
/* Make new */
work = arena_push(&L.arena, struct work);
work = arena_push(&G.arena, struct work);
*work = (struct work) {
.condition_variable_finished = sys_condition_variable_alloc(),
.gen = 1
@ -188,15 +188,15 @@ INTERNAL struct work *work_alloc_assume_locked(void)
INTERNAL void work_release_assume_locked(struct work *work)
{
sys_mutex_assert_locked(&L.mutex);
work->next_free = L.free_work_head;
L.free_work_head = work;
sys_mutex_assert_locked(&G.mutex);
work->next_free = G.free_work_head;
G.free_work_head = work;
++work->gen;
}
INTERNAL struct work_handle work_to_handle_assume_locked(struct work *work)
{
sys_mutex_assert_locked(&L.mutex);
sys_mutex_assert_locked(&G.mutex);
return (struct work_handle) {
.work = work,
.gen = work->gen
@ -205,18 +205,18 @@ INTERNAL struct work_handle work_to_handle_assume_locked(struct work *work)
INTERNAL struct work_task *task_alloc_assume_locked(void)
{
sys_mutex_assert_locked(&L.mutex);
sys_mutex_assert_locked(&G.mutex);
struct work_task *task = NULL;
/* Allocate task */
if (L.free_task_head) {
if (G.free_task_head) {
/* Reuse from free list */
task = L.free_task_head;
L.free_task_head = task->next_free;
task = G.free_task_head;
G.free_task_head = task->next_free;
*task = (struct work_task) { 0 };
} else {
/* Make new */
task = arena_push_zero(&L.arena, struct work_task);
task = arena_push_zero(&G.arena, struct work_task);
}
return task;
@ -224,9 +224,9 @@ INTERNAL struct work_task *task_alloc_assume_locked(void)
INTERNAL void task_release_assume_locked(struct work_task *task)
{
sys_mutex_assert_locked(&L.mutex);
task->next_free = L.free_task_head;
L.free_task_head = task;
sys_mutex_assert_locked(&G.mutex);
task->next_free = G.free_task_head;
G.free_task_head = task;
}
/* ========================== *
@ -236,22 +236,22 @@ INTERNAL void task_release_assume_locked(struct work_task *task)
INTERNAL void work_schedule_assume_locked(struct work *work)
{
__prof;
sys_mutex_assert_locked(&L.mutex);
sys_mutex_assert_locked(&G.mutex);
enum work_priority priority = work->priority;
if (L.scheduled_work_head) {
struct work *head = L.scheduled_work_head;
if (G.scheduled_work_head) {
struct work *head = G.scheduled_work_head;
if (head->priority >= priority) {
/* Head is lower priority, insert work as new head */
L.scheduled_work_head = work;
G.scheduled_work_head = work;
work->next_scheduled = head;
head->prev_scheduled = work;
} else {
/* Find higher priority */
struct work *tail = NULL;
for (i32 i = priority; i >= 0; --i) {
tail = L.scheduled_work_priority_tails[i];
tail = G.scheduled_work_priority_tails[i];
if (tail) {
break;
}
@ -262,28 +262,28 @@ INTERNAL void work_schedule_assume_locked(struct work *work)
tail->next_scheduled = work;
}
} else {
L.scheduled_work_head = work;
G.scheduled_work_head = work;
}
L.scheduled_work_priority_tails[priority] = work;
G.scheduled_work_priority_tails[priority] = work;
WRITE_BARRIER();
sys_semaphore_signal(&L.semaphore, min_u32(work->tasks_incomplete, L.worker_count));
sys_semaphore_signal(&G.semaphore, min_u32(work->tasks_incomplete, G.worker_count));
}
INTERNAL void work_unschedule_assume_locked(struct work *work)
{
__prof;
sys_mutex_assert_locked(&L.mutex);
sys_mutex_assert_locked(&G.mutex);
struct work *prev = (struct work *)work->prev_scheduled;
struct work *next = (struct work *)work->next_scheduled;
/* Remove from priority tails array */
enum work_priority priority = work->priority;
struct work *priority_tail = L.scheduled_work_priority_tails[priority];
struct work *priority_tail = G.scheduled_work_priority_tails[priority];
if (priority_tail == work && (!prev || prev->priority == priority)) {
L.scheduled_work_priority_tails[priority] = prev;
G.scheduled_work_priority_tails[priority] = prev;
}
/* Unhook work */
@ -293,8 +293,8 @@ INTERNAL void work_unschedule_assume_locked(struct work *work)
if (next) {
next->prev_scheduled = prev;
}
if (work == L.scheduled_work_head) {
L.scheduled_work_head = next;
if (work == G.scheduled_work_head) {
G.scheduled_work_head = next;
}
}
@ -305,7 +305,7 @@ INTERNAL void work_unschedule_assume_locked(struct work *work)
INTERNAL struct work_task *work_dequeue_task_assume_locked(struct work *work)
{
__prof;
sys_mutex_assert_locked(&L.mutex);
sys_mutex_assert_locked(&G.mutex);
struct work_task *task = work->task_head;
if (task) {
work->task_head = task->next_in_work;
@ -326,7 +326,7 @@ INTERNAL struct work_task *work_dequeue_task_assume_locked(struct work *work)
INTERNAL b32 work_exec_single_task_maybe_release_assume_locked(struct work *work)
{
__prof;
sys_mutex_assert_locked(&L.mutex);
sys_mutex_assert_locked(&G.mutex);
struct work_task *task = work_dequeue_task_assume_locked(work);
b32 more_tasks = work->task_head != NULL;
@ -336,11 +336,11 @@ INTERNAL b32 work_exec_single_task_maybe_release_assume_locked(struct work *work
++work->workers;
/* Do task (temporarily unlock) */
sys_mutex_unlock(&L.mutex);
sys_mutex_unlock(&G.mutex);
{
task->func(task->data);
}
sys_mutex_lock(&L.mutex);
sys_mutex_lock(&G.mutex);
--work->workers;
--work->tasks_incomplete;
task_release_assume_locked(task);
@ -362,7 +362,7 @@ INTERNAL b32 work_exec_single_task_maybe_release_assume_locked(struct work *work
INTERNAL void work_exec_remaining_tasks_maybe_release_assume_locked(struct work *work)
{
__prof;
sys_mutex_assert_locked(&L.mutex);
sys_mutex_assert_locked(&G.mutex);
b32 more_tasks = true;
while (more_tasks) {
@ -385,24 +385,24 @@ INTERNAL void worker_thread_entry_point(void *thread_data)
};
while (true) {
sys_semaphore_wait(&L.semaphore);
if (L.shutdown) {
sys_semaphore_wait(&G.semaphore);
if (G.shutdown) {
/* Exit thread */
break;
}
while (L.scheduled_work_head) {
while (G.scheduled_work_head) {
/* Do work from top */
sys_mutex_lock(&L.mutex);
sys_mutex_lock(&G.mutex);
{
struct work *work = L.scheduled_work_head;
struct work *work = G.scheduled_work_head;
if (work) {
__profscope(work_pool_task);
--L.idle_worker_count;
--G.idle_worker_count;
work_exec_single_task_maybe_release_assume_locked((struct work *)work);
++L.idle_worker_count;
++G.idle_worker_count;
}
}
sys_mutex_unlock(&L.mutex);
sys_mutex_unlock(&G.mutex);
}
}
}
@ -415,7 +415,7 @@ INTERNAL void worker_thread_entry_point(void *thread_data)
INTERNAL struct work_handle work_push_from_slate_assume_locked(struct work_slate *ws, b32 help, enum work_priority priority)
{
__prof;
sys_mutex_assert_locked(&L.mutex);
sys_mutex_assert_locked(&G.mutex);
struct work *work = work_alloc_assume_locked();
struct work_handle wh = work_to_handle_assume_locked(work);
@ -448,7 +448,7 @@ INTERNAL struct work_handle work_push_from_slate_assume_locked(struct work_slate
struct worker_ctx *ctx = thread_local_eval(&tl_worker_ctx);
if (ctx->is_worker) {
b32 more_tasks = true;
while (L.idle_worker_count == 0 && work->workers == 0 && more_tasks) {
while (G.idle_worker_count == 0 && work->workers == 0 && more_tasks) {
more_tasks = work_exec_single_task_maybe_release_assume_locked(work);
}
}
@ -460,7 +460,7 @@ INTERNAL struct work_handle work_push_from_slate_assume_locked(struct work_slate
INTERNAL struct work_handle work_push_task_internal(work_task_func *func, void *data, b32 help, enum work_priority priority)
{
struct work_handle handle;
sys_mutex_lock(&L.mutex);
sys_mutex_lock(&G.mutex);
{
struct work_task *task = task_alloc_assume_locked();
task->data = data;
@ -473,7 +473,7 @@ INTERNAL struct work_handle work_push_task_internal(work_task_func *func, void *
};
handle = work_push_from_slate_assume_locked(&ws, help, priority);
}
sys_mutex_unlock(&L.mutex);
sys_mutex_unlock(&G.mutex);
return handle;
}
@ -505,11 +505,11 @@ void work_slate_push_task(struct work_slate *ws, work_task_func *func, void *dat
__prof;
struct work_task *task = NULL;
sys_mutex_lock(&L.mutex);
sys_mutex_lock(&G.mutex);
{
task = task_alloc_assume_locked();
}
sys_mutex_unlock(&L.mutex);
sys_mutex_unlock(&G.mutex);
task->data = data;
task->func = func;
@ -530,11 +530,11 @@ struct work_handle work_slate_end(struct work_slate *ws, enum work_priority prio
__prof;
struct work_handle handle;
sys_mutex_lock(&L.mutex);
sys_mutex_lock(&G.mutex);
{
handle = work_push_from_slate_assume_locked(ws, false, priority);
}
sys_mutex_unlock(&L.mutex);
sys_mutex_unlock(&G.mutex);
return handle;
}
@ -543,9 +543,9 @@ struct work_handle work_slate_end_and_help(struct work_slate *ws, enum work_prio
{
__prof;
sys_mutex_lock(&L.mutex);
sys_mutex_lock(&G.mutex);
struct work_handle handle = work_push_from_slate_assume_locked(ws, true, priority);
sys_mutex_unlock(&L.mutex);
sys_mutex_unlock(&G.mutex);
return handle;
}
@ -556,7 +556,7 @@ struct work_handle work_slate_end_and_help(struct work_slate *ws, enum work_prio
INTERNAL struct work *work_from_handle_assume_locked(struct work_handle handle)
{
sys_mutex_assert_locked(&L.mutex);
sys_mutex_assert_locked(&G.mutex);
struct work *work = handle.work;
if (work->gen != handle.gen) {
@ -570,7 +570,7 @@ INTERNAL struct work *work_from_handle_assume_locked(struct work_handle handle)
void work_wait(struct work_handle handle)
{
__prof;
sys_mutex_lock(&L.mutex);
sys_mutex_lock(&G.mutex);
{
struct work *work = work_from_handle_assume_locked(handle);
if (work) {
@ -581,24 +581,24 @@ void work_wait(struct work_handle handle)
work = work_from_handle_assume_locked(handle); /* Re-checking work is sitll valid here in case work_do caused work to release */
if (work) {
while (work->status != WORK_STATUS_DONE) {
sys_condition_variable_wait(&work->condition_variable_finished, &L.mutex);
sys_condition_variable_wait(&work->condition_variable_finished, &G.mutex);
}
}
}
}
sys_mutex_unlock(&L.mutex);
sys_mutex_unlock(&G.mutex);
}
/* Try to pick up any scheduled tasks */
void work_help(struct work_handle handle)
{
__prof;
sys_mutex_lock(&L.mutex);
sys_mutex_lock(&G.mutex);
struct work *work = work_from_handle_assume_locked(handle);
if (work) {
work_exec_remaining_tasks_maybe_release_assume_locked(work);
}
sys_mutex_unlock(&L.mutex);
sys_mutex_unlock(&G.mutex);
}