more unified tls model. testing json parsing & unicode display.

This commit is contained in:
jacob 2024-04-05 18:02:17 -05:00
parent 2a2e857e08
commit 773a221b44
22 changed files with 500 additions and 283 deletions

25
res/test.json Normal file
View File

@ -0,0 +1,25 @@
{
"glossary": {
"title": "example glossary",
"GlossDiv": {
"title": "S",
"GlossList": {
"GlossEntry": {
"ID": "SGML",
"SortAs": "SGML",
"GlossTerm": "Standard Generalized Markup Language",
"Acronym": "SGML",
"Abbrev": "ISO 8879:1986",
"GlossDef": {
"para": "A meta-markup language, used to create markup languages such as DocBook.",
"GlossSeeAlso": [
"GML",
"XML"
]
},
"GlossSee": "markup"
}
}
}
}
}

View File

@ -8,7 +8,6 @@
#include "game.h"
#include "playback.h"
#include "log.h"
#include "console.h"
#include "resource.h"
#include "asset_cache.h"
#include "font.h"
@ -22,9 +21,6 @@
#include "draw.h"
#include "math.h"
#define WRITE_DIR "power_play"
#define SETTINGS_FILENAME "settings.json"
#if RTC
# if DEVELOPER
# define WINDOW_TITLE "Debug (Developer Build)"
@ -107,10 +103,48 @@ INTERNAL struct sys_window_settings default_window_settings(struct sys_window *w
* Entry point
* ========================== */
/* TODO: remove this (testing) */
#include "json.h"
void app_entry_point(void)
{
L.quit_sf = sync_flag_alloc();
{
struct temp_arena scratch = scratch_begin_no_conflict();
struct resource resource = resource_open(STR("res/test.json"));
struct json_parse_result res = json_from_string(scratch.arena, STRING_FROM_BUFFER(resource.bytes));
resource_close(resource);
struct json *root = res.root;
struct json_error_list errors = res.errors;
(UNUSED)root;
(UNUSED)errors;
if (errors.first) {
struct string err = string_format(scratch.arena,
STR("Error from offset %F to %F:\n%F"),
FMT_UINT(errors.first->start),
FMT_UINT(errors.first->end),
FMT_STR(errors.first->msg));
sys_panic(err);
}
scratch_end(scratch);
}
u32 worker_count = 4;
{
/* FIXME: Switch this on to utilize all cores. Only decreasing worker count for testing purposes. */
@ -131,8 +165,6 @@ void app_entry_point(void)
L.arena = arena_alloc(GIGABYTE(64));
L.write_path = initialize_write_directory(&L.arena, STR(WRITE_DIR));
/* Startup base systems */
{
/* Startup logging */
{
struct temp_arena scratch = scratch_begin_no_conflict();
@ -143,11 +175,6 @@ void app_entry_point(void)
logf_info("Start of logs");
}
/* Startup console */
struct console_startup_receipt console_sr = console_startup();
(UNUSED)console_sr;
}
/* Create window */
struct sys_window window = sys_window_alloc();

View File

@ -21,7 +21,7 @@ struct arena arena_alloc(u64 reserve)
arena.base = sys_memory_reserve(reserve);
if (!arena.base) {
/* Hard fail on memory reserve failure for now */
sys_panic_raw("Arena initialization error: Failed to reserve arena memory");
sys_panic_raw("Failed to reserve memory");
}
arena.reserved = reserve;
@ -31,7 +31,7 @@ struct arena arena_alloc(u64 reserve)
ASAN_POISON(arena.base, ARENA_BLOCK_SIZE);
if (!arena.base) {
/* Hard fail on commit failure */
sys_panic_raw("Arena initialization error: Failed to commit initial arena memory");
sys_panic_raw("Failed to commit initial memory block: System may be out of memory");
}
arena.committed = ARENA_BLOCK_SIZE;
@ -68,12 +68,12 @@ void *_arena_push_bytes(struct arena *arena, u64 size, u64 align)
u64 new_capacity = arena->committed + commit_bytes;
if (new_capacity > arena->reserved) {
/* Hard fail if we overflow reserved memory for now */
sys_panic_raw("Failed to commit new arena memory (Overflow of reserved memory)");
sys_panic_raw("Failed to commit new memory block: Overflow of reserved memory");
}
void *commit_address = arena->base + arena->committed;
if (!sys_memory_commit(commit_address, commit_bytes)) {
/* Hard fail on memory allocation failure for now */
sys_panic_raw("Failed to commit new arena memory (System out of memory?)");
sys_panic_raw("Failed to commit new memory block: System may be out of memory");
}
__proffree(arena->base);
__profalloc(arena->base, arena->committed + commit_bytes);

View File

@ -3,18 +3,18 @@
#include "memory.h"
#define arena_push(a, type) ((type *)_arena_push_bytes((a), sizeof(type), ALIGNOF(type)))
#define arena_push_zero(a, type) ((type *)_arena_push_bytes_zero((a), sizeof(type), ALIGNOF(type)))
#define arena_push(a, type) ((type *)_arena_push_bytes((a), sizeof(type), alignof(type)))
#define arena_push_zero(a, type) ((type *)_arena_push_bytes_zero((a), sizeof(type), alignof(type)))
#define arena_push_array(a, type, n) ((type *)_arena_push_bytes((a), (sizeof(type) * (n)), ALIGNOF(type)))
#define arena_push_array_zero(a, type, n) ((type *)_arena_push_bytes_zero((a), (sizeof(type) * (n)), ALIGNOF(type)))
#define arena_push_array(a, type, n) ((type *)_arena_push_bytes((a), (sizeof(type) * (n)), alignof(type)))
#define arena_push_array_zero(a, type, n) ((type *)_arena_push_bytes_zero((a), (sizeof(type) * (n)), alignof(type)))
#define arena_pop(a, type, dest) _arena_pop_struct((a), sizeof(type), dest)
#define arena_pop_array(a, type, n, dest) _arena_pop_struct((a), sizeof(type) * (n), dest)
/* Returns a pointer to where the next allocation would be (at alignment of type).
* Equivalent arena_push but without actually allocating anything. */
#define arena_dry_push(a, type) (type *)(_arena_dry_push((a), ALIGNOF(type)))
#define arena_dry_push(a, type) (type *)(_arena_dry_push((a), alignof(type)))
#define arena_align(a, align) (void *)(_arena_align((a), align))

View File

@ -3,14 +3,14 @@
#if OS_WINDOWS
FORCE_INLINE i32 atomic_i32_eval(struct atomic_i32 *x) { return _InterlockedExchangeAdd((volatile long *)&x->_v, 0); }
FORCE_INLINE i32 atomic_i32_eval(struct atomic_i32 *x) { return _InterlockedOr((volatile long *)&x->_v, 0); }
FORCE_INLINE i32 atomic_i32_inc_eval(struct atomic_i32 *x) { return _InterlockedIncrement((volatile long *)&x->_v); }
FORCE_INLINE i32 atomic_i32_dec_eval(struct atomic_i32 *x) { return _InterlockedDecrement((volatile long *)&x->_v); }
FORCE_INLINE i32 atomic_i32_eval_add(struct atomic_i32 *x, i32 a) { return _InterlockedExchangeAdd((volatile long *)&x->_v, a); }
FORCE_INLINE i32 atomic_i32_eval_exchange(struct atomic_i32 *x, i32 e) { return _InterlockedExchange((volatile long *)&x->_v, e); }
FORCE_INLINE i32 atomic_i32_eval_compare_exchange(struct atomic_i32 *x, i32 c, i32 e) { return _InterlockedCompareExchange((volatile long *)&x->_v, e, c); }
FORCE_INLINE i64 atomic_i64_eval(struct atomic_i64 *x) { return _InterlockedExchangeAdd64(&x->_v, 0); }
FORCE_INLINE i64 atomic_i64_eval(struct atomic_i64 *x) { return _InterlockedOr64(&x->_v, 0); }
FORCE_INLINE i64 atomic_i64_inc_eval(struct atomic_i64 *x) { return _InterlockedIncrement64(&x->_v); }
FORCE_INLINE i64 atomic_i64_dec_eval(struct atomic_i64 *x) { return _InterlockedDecrement64(&x->_v); }
FORCE_INLINE i64 atomic_i64_eval_add(struct atomic_i64 *x, i64 a) { return _InterlockedExchangeAdd64(&x->_v, a); }
@ -24,14 +24,14 @@ FORCE_INLINE u32 atomic_u32_eval_add(struct atomic_u32 *x, u32 a) { return _Inte
FORCE_INLINE u32 atomic_u32_eval_exchange(struct atomic_u32 *x, u32 e) { return _InterlockedExchange((volatile long *)&x->_v, e); }
FORCE_INLINE u32 atomic_u32_eval_compare_exchange(struct atomic_u32 *x, u32 c, u32 e) { return _InterlockedCompareExchange((volatile long *)&x->_v, e, c); }
FORCE_INLINE u64 atomic_u64_eval(struct atomic_u64 *x) { return _InterlockedExchangeAdd64((volatile i64 *)&x->_v, 0); }
FORCE_INLINE u64 atomic_u64_eval(struct atomic_u64 *x) { return _InterlockedOr64((volatile i64 *)&x->_v, 0); }
FORCE_INLINE u64 atomic_u64_inc_eval(struct atomic_u64 *x) { return _InterlockedIncrement64((volatile i64 *)&x->_v); }
FORCE_INLINE u64 atomic_u64_dec_eval(struct atomic_u64 *x) { return _InterlockedDecrement64((volatile i64 *)&x->_v); }
FORCE_INLINE u64 atomic_u64_eval_add(struct atomic_u64 *x, u64 a) { return _InterlockedExchangeAdd64((volatile i64 *)&x->_v, a); }
FORCE_INLINE u64 atomic_u64_eval_exchange(struct atomic_u64 *x, u64 e) { return _InterlockedExchange64((volatile i64 *)&x->_v, e); }
FORCE_INLINE u64 atomic_u64_eval_compare_exchange(struct atomic_u64 *x, u64 c, u64 e) { return _InterlockedCompareExchange64((volatile i64 *)&x->_v, e, c); }
FORCE_INLINE void *atomic_ptr_eval(struct atomic_ptr *x) { return (void *)_InterlockedExchangeAdd64((volatile i64 *)&x->_v, 0); }
FORCE_INLINE void *atomic_ptr_eval(struct atomic_ptr *x) { return (void *)_InterlockedOr64((volatile i64 *)&x->_v, 0); }
FORCE_INLINE void *atomic_ptr_eval_compare_exchange(struct atomic_ptr *x, void *c, void *e) { return (void *)_InterlockedCompareExchange64((volatile i64 *)&x->_v, (i64)e, (i64)c); }
#else

View File

@ -133,7 +133,6 @@ extern "C" {
#else
#define ASSERT(cond) (void)(0)
#define DEBUGBREAK
#endif
@ -180,11 +179,14 @@ void __asan_unpoison_memory_region(void *, size_t);
#define GIGABYTE(n) (n*MEGABYTE(1024ULL))
#define TERABYTE(n) (n*GIGABYTE(1024ULL))
/* Sizeof & Alignof */
#ifdef __cplusplus
# define ALIGNOF(type) alignof(type)
#else
# define ALIGNOF(type) __alignof__(type)
/* typeof */
#if defined(__cplusplus) || (__STDC_VERSION__ < 202311L)
# define typeof(type) __typeof__(type)
#endif
/* alignof */
#if !defined(__cplusplus) && (__STDC_VERSION__ < 202311L)
# define alignof(type) __alignof__(type)
#endif
#define ARRAY_COUNT(a) (sizeof(a) / sizeof((a)[0]))

View File

@ -1,5 +1,8 @@
/* Project-wide configurable constants */
#define WRITE_DIR "power_play"
#define SETTINGS_FILENAME "settings.json"
/* If we are not compiling in developer mode, assume resources are embedded as
* a tar archive in the executable. Otherwise, look for resources in the file
* system. */

View File

@ -1,32 +0,0 @@
#include "console.h"
#include "log.h"
#include "sys.h"
//GLOBAL struct {
//i32 i;
//} L = { 0 } DEBUG_LVAR(L_console);
/* ========================== *
* Log callback
* ========================== */
INTERNAL void console_log_callback(struct log_event event)
{
(UNUSED)event;
}
struct console_startup_receipt console_startup(void)
{
log_register_callback(&console_log_callback);
return (struct console_startup_receipt) { 0 };
}
/* ========================== *
* Event callback
* ========================== */
b32 console_process_event(struct sys_event event)
{
(UNUSED)event;
return false;
}

View File

@ -1,10 +0,0 @@
#ifndef CONSOLE_H
#define CONSOLE_H
#include "sys.h"
struct console_startup_receipt { i32 _; };
struct console_startup_receipt console_startup(void);
b32 console_process_event(struct sys_event event);
#endif

View File

@ -46,7 +46,7 @@ INTERNAL struct game_cmd_array pop_cmds(struct arena *arena)
sys_mutex_lock(&L.game_cmds_mutex);
{
struct buffer game_cmds_buff = arena_to_buffer(&L.game_cmds_arena);
arena_align(arena, ALIGNOF(struct game_cmd));
arena_align(arena, alignof(struct game_cmd));
array.cmds = (struct game_cmd *)arena_push_array(arena, u8, game_cmds_buff.size);
array.count = game_cmds_buff.size / sizeof(struct game_cmd);
MEMCPY(array.cmds, game_cmds_buff.data, game_cmds_buff.size);

View File

@ -4,9 +4,9 @@
#include "scratch.h"
#include "math.h"
/* Non-standard-conforming JSON parser.
* - Unicode escape sequences in strings (\u) not supported
* - Leading 0s in numbers are allowed
/* TODO:
* - Support unicode escape sequences in strings (\u)
* - Don't allow leading 0s in numbers
*/
/* ========================== *
@ -21,9 +21,12 @@
case 0x20: /* Space */ \
case 0x09 /* Horizontal tab */
#define CASE_DIGIT \
#define CASE_DIGIT_0_TO_9 \
case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9'
#define CASE_DIGIT_1_TO_9 \
case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9'
#define CASE_SYMBOL \
case ',': case ':': case '[': case ']': case '{': case '}'
@ -157,13 +160,60 @@ INTERNAL struct token_list lex(struct arena *arena, struct string src)
++pos;
} break;
#if 0
case '-': {
b32 is_whole_sign = false;
if ((pos + 1) < src.len) {
switch (src.text[pos + 1]) {
case '0': {
if ((pos + 2) < src.len) {
}
} break;
CASE_DIGIT_1_TO_9: {
is_whole_sign = true;
} break;
default: break;
}
}
if (is_whole_sign) {
struct token *part = push_token(arena, &res);
part->type = TOKEN_TYPE_PART_WHOLE_SIGN_NEGATIVE;
part->start = pos;
part->end = pos + 1;
++pos;
} else {
++pos;
break;
}
};
CASE_DIGIT_0_TO_9: {
t->type = TOKEN_TYPE_NUMBER;
enum lex_number_state state = LEX_NUMBER_STATE_WHOLE;
b32 number_done = false;
while (!number_done && pos < src.len) {
switch (src.text[pos]) {
CASE_DIGIT
}
}
} break;
#else
/* Number */
case '-': {
/* Verify '-' precedes digit */
b32 next_is_digit = false;
if ((pos + 1) < src.len) {
switch (src.text[pos + 1]) {
CASE_DIGIT: {
CASE_DIGIT_0_TO_9: {
next_is_digit = true;
} break;
}
@ -173,13 +223,13 @@ INTERNAL struct token_list lex(struct arena *arena, struct string src)
break;
}
} FALLTHROUGH;
CASE_DIGIT: {
CASE_DIGIT_0_TO_9: {
t->type = TOKEN_TYPE_NUMBER;
enum lex_number_state state = LEX_NUMBER_STATE_WHOLE;
b32 number_done = false;
while (!number_done && pos < src.len) {
switch (src.text[pos]) {
CASE_DIGIT: {
CASE_DIGIT_0_TO_9: {
++pos;
} break;
@ -188,7 +238,7 @@ INTERNAL struct token_list lex(struct arena *arena, struct string src)
if (state == LEX_NUMBER_STATE_WHOLE && (pos + 1) < src.len) {
u8 c1 = src.text[pos + 1];
switch (c1) {
CASE_DIGIT: {
CASE_DIGIT_0_TO_9: {
/* Consume '.' */
++consume;
} break;
@ -210,7 +260,7 @@ INTERNAL struct token_list lex(struct arena *arena, struct string src)
if ((state == LEX_NUMBER_STATE_WHOLE || state == LEX_NUMBER_STATE_FRACTION) && (pos + 1) < src.len) {
u8 c1 = src.text[pos + 1];
switch (c1) {
CASE_DIGIT: {
CASE_DIGIT_0_TO_9: {
/* Consume 'E'/'e' */
++consume;
} break;
@ -220,7 +270,7 @@ INTERNAL struct token_list lex(struct arena *arena, struct string src)
if ((pos + 2) < src.len) {
u8 c2 = src.text[pos + 2];
switch (c2) {
CASE_DIGIT: {
CASE_DIGIT_0_TO_9: {
/* Consume 'E'/'e' & '+'/'-' */
consume += 2;
} break;
@ -247,6 +297,7 @@ INTERNAL struct token_list lex(struct arena *arena, struct string src)
}
}
} break;
#endif
/* String */
case '"': {
@ -390,7 +441,7 @@ INTERNAL f64 interpret_number(struct string src)
enum lex_number_state state = LEX_NUMBER_STATE_WHOLE;
while (pos < src.len) {
switch (src.text[pos]) {
CASE_DIGIT: {
CASE_DIGIT_0_TO_9: {
switch (state) {
case LEX_NUMBER_STATE_WHOLE: {
if (!whole_present) {

View File

@ -87,7 +87,7 @@ struct mp3_decode_result mp3_decode(struct arena *arena, struct buffer encoded,
* Read
* ========================== */
arena_align(arena, ALIGNOF(i16));
arena_align(arena, alignof(i16));
res.pcm.samples = (i16 *)arena_dry_push(arena, u8);
u64 sample_bytes_read = 0;

21
src/scratch.c Normal file
View File

@ -0,0 +1,21 @@
#include "scratch.h"
INTERNAL THREAD_LOCAL_VAR_ALLOC_FUNC_DEF(test_scratch_context_alloc, vctx)
{
__prof;
struct scratch_ctx *ctx = vctx;
for (u32 i = 0; i < ARRAY_COUNT(ctx->arenas); ++i) {
ctx->arenas[i] = arena_alloc(SCRATCH_ARENA_RESERVE);
}
}
INTERNAL THREAD_LOCAL_VAR_RELEASE_FUNC_DEF(test_scratch_context_release, vctx)
{
__prof;
struct scratch_ctx *ctx = vctx;
for (u32 i = 0; i < ARRAY_COUNT(ctx->arenas); ++i) {
arena_release(&ctx->arenas[i]);
}
}
THREAD_LOCAL_VAR_DEF_EXTERN(tl_scratch_ctx, struct scratch_ctx, &test_scratch_context_alloc, &test_scratch_context_release);

View File

@ -9,10 +9,10 @@
#define SCRATCH_ARENA_RESERVE (GIGABYTE(64))
/* ========================== *
* Scratch TLS
* Thread local state
* ========================== */
struct scratch_tls {
struct scratch_ctx {
struct arena arenas[SCRATCH_ARENAS_PER_THREAD];
#if RTC
@ -22,24 +22,10 @@ struct scratch_tls {
#endif
};
INLINE TLS_ALLOC_FUNC_DEF(scratch_tls_alloc, vctx)
{
struct scratch_tls *ctx = (struct scratch_tls *)vctx;
for (u32 i = 0; i < ARRAY_COUNT(ctx->arenas); ++i) {
ctx->arenas[i] = arena_alloc(SCRATCH_ARENA_RESERVE);
}
}
INLINE TLS_RELEASE_FUNC_DEF(scratch_tls_release, vctx)
{
struct scratch_tls *ctx = (struct scratch_tls *)vctx;
for (u32 i = 0; i < ARRAY_COUNT(ctx->arenas); ++i) {
arena_release(&ctx->arenas[i]);
}
}
THREAD_LOCAL_VAR_DECL_EXTERN(tl_scratch_ctx, struct scratch_ctx);
/* ========================== *
* Begin
* Scratch begin
* ========================== */
/* Any arena parameters in the calling function's context should be passed into this
@ -59,7 +45,7 @@ INLINE struct temp_arena _scratch_begin(struct arena *potential_conflict)
/* Use `scratch_begin_no_conflict` if no conflicts are present */
ASSERT(potential_conflict != NULL);
struct scratch_tls *ctx = (struct scratch_tls *)tls_get(SCRATCH_TLS);
struct scratch_ctx *ctx = thread_local_eval(&tl_scratch_ctx);
struct arena *scratch = &ctx->arenas[0];
if (potential_conflict && scratch->base == potential_conflict->base) {
scratch = &ctx->arenas[1];
@ -92,7 +78,7 @@ INLINE struct temp_arena _scratch_begin(struct arena *potential_conflict)
INLINE struct temp_arena _scratch_begin_no_conflict(void)
{
struct scratch_tls *ctx = (struct scratch_tls *)tls_get(SCRATCH_TLS);
struct scratch_ctx *ctx = thread_local_eval(&tl_scratch_ctx);
struct arena *scratch = &ctx->arenas[0];
struct temp_arena temp = arena_temp_begin(scratch);
@ -108,13 +94,13 @@ INLINE struct temp_arena _scratch_begin_no_conflict(void)
}
/* ========================== *
* End
* Scratch end
* ========================== */
INLINE void scratch_end(struct temp_arena scratch_temp)
{
#if RTC
struct scratch_tls *ctx = (struct scratch_tls *)tls_get(SCRATCH_TLS);
struct scratch_ctx *ctx = thread_local_eval(&tl_scratch_ctx);
if (ctx->scratch_id_stack_count > 0) {
u64 scratch_id = scratch_temp.scratch_id;
u64 expected_id = ctx->scratch_id_stack[--ctx->scratch_id_stack_count];

View File

@ -1,7 +1,7 @@
#ifndef SYS_H
#define SYS_H
struct tls_table;
struct thread_local_store;
/* ========================== *
* Events
@ -384,7 +384,7 @@ void sys_semaphore_signal(struct sys_semaphore *semaphore, u32 count);
* Thread local storage
* ========================== */
struct tls_table *sys_thread_get_tls(void);
struct thread_local_store *sys_thread_get_thread_local_store(void);
/* ========================== *
* Threads
@ -475,6 +475,7 @@ void sys_panic(struct string msg);
* Sleep
* ========================== */
void sys_sleep(f64 seconds);
void sys_sleep_precise(f64 seconds);
void sys_sleep_imprecise(f64 seconds);
#endif

View File

@ -1455,7 +1455,7 @@ void sys_semaphore_signal(struct sys_semaphore *semaphore, u32 count)
struct win32_tls {
HANDLE sleep_timer;
struct tls_table app_tls;
struct thread_local_store store;
};
INTERNAL void win32_thread_set_tls(struct win32_tls *ctx)
@ -1474,20 +1474,20 @@ INTERNAL struct win32_tls win32_tls_alloc(void)
{
struct win32_tls tls = { 0 };
tls.sleep_timer = CreateWaitableTimerExW(NULL, NULL, CREATE_WAITABLE_TIMER_HIGH_RESOLUTION, TIMER_ALL_ACCESS);
tls.app_tls = tls_table_alloc();
tls.store = thread_local_store_alloc();
return tls;
}
INTERNAL void win32_tls_release(struct win32_tls *tls)
{
tls_table_release(&tls->app_tls);
thread_local_store_release(&tls->store);
CloseHandle(tls->sleep_timer);
}
struct tls_table *sys_thread_get_tls(void)
struct thread_local_store *sys_thread_get_thread_local_store(void)
{
struct win32_tls *thread_ctx = (struct win32_tls *)win32_thread_get_tls();
return &thread_ctx->app_tls;
return &thread_ctx->store;
}
/* ========================== *
@ -1746,46 +1746,7 @@ void sys_panic(struct string msg)
/* https://blog.bearcats.nl/perfect-sleep-function/ */
INTERNAL void win32_classic_sleep(f64 seconds)
{
__prof;
i64 qpc_per_second = L.timer_frequency.QuadPart;
i32 scheduler_period_ms = L.scheduler_period_ms;
LARGE_INTEGER qpc;
QueryPerformanceCounter(&qpc);
i64 target_qpc = (i64)(qpc.QuadPart + seconds * qpc_per_second);
/* TODO: Calculate tolerance */
/* TODO: Maybe increase tolerance for higher precision but more power usage */
//const double tolerance = 1.02;
const double tolerance = 0.52 * scheduler_period_ms;
/* Sleep */
f64 sleep_ms = (seconds * 1000) - tolerance;
i32 sleep_slices = (i32)(sleep_ms / scheduler_period_ms);
if (sleep_slices > 0) {
__profscope(win32_sleep);
Sleep((DWORD)sleep_slices * scheduler_period_ms);
}
{
__profscope(win32_qpc);
QueryPerformanceCounter(&qpc);
}
/* Spin for any remaining time */
{
__profscope(sleep_spin);
while (qpc.QuadPart < target_qpc) {
YieldProcessor();
QueryPerformanceCounter(&qpc);
}
}
}
INTERNAL void win32_timer_sleep(f64 seconds, HANDLE timer)
INTERNAL void win32_precise_sleep_timer(f64 seconds, HANDLE timer)
{
__prof;
@ -1827,23 +1788,68 @@ INTERNAL void win32_timer_sleep(f64 seconds, HANDLE timer)
QueryPerformanceCounter(&qpc);
}
}
}
void sys_sleep(f64 seconds)
INTERNAL void win32_precise_sleep_legacy(f64 seconds)
{
__prof;
i64 qpc_per_second = L.timer_frequency.QuadPart;
i32 scheduler_period_ms = L.scheduler_period_ms;
LARGE_INTEGER qpc;
QueryPerformanceCounter(&qpc);
i64 target_qpc = (i64)(qpc.QuadPart + seconds * qpc_per_second);
/* TODO: Calculate tolerance */
/* TODO: Maybe increase tolerance for higher precision but more power usage */
//const double tolerance = 1.02;
const double tolerance = 0.52 * scheduler_period_ms;
/* Sleep */
f64 sleep_ms = (seconds * 1000) - tolerance;
i32 sleep_slices = (i32)(sleep_ms / scheduler_period_ms);
if (sleep_slices > 0) {
__profscope(win32_sleep);
Sleep((DWORD)sleep_slices * scheduler_period_ms);
}
{
__profscope(win32_qpc);
QueryPerformanceCounter(&qpc);
}
/* Spin for any remaining time */
{
__profscope(sleep_spin);
while (qpc.QuadPart < target_qpc) {
YieldProcessor();
QueryPerformanceCounter(&qpc);
}
}
}
void sys_sleep_precise(f64 seconds)
{
__prof;
HANDLE timer = win32_thread_get_tls()->sleep_timer;
if (timer) {
/* Use newer sleeping method */
win32_timer_sleep(seconds, timer);
win32_precise_sleep_timer(seconds, timer);
} else {
/* Fall back to older sleep method if CREATE_WAITABLE_TIMER_HIGH_RESOLUTION
* is not available due to older windows version */
win32_classic_sleep(seconds);
win32_precise_sleep_legacy(seconds);
}
}
void sys_sleep_imprecise(f64 seconds)
{
__prof;
u32 ms = math_round((f32)seconds);
Sleep(ms);
}
/* ========================== *
* Entry point
* ========================== */
@ -1863,10 +1869,11 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
const wchar_t *error_msg = NULL;
SetThreadDescription(GetCurrentThread(), L"Main thread");
/* ========================== *
* Win32 setup
* ========================== */
/* Initialize COM */
CoInitializeEx(NULL, COINIT_APARTMENTTHREADED);
SetThreadDescription(GetCurrentThread(), L"Main thread");
/* Query system info */
GetSystemInfo(&L.info);
@ -1881,22 +1888,6 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
/* Set up timing period */
timeBeginPeriod(L.scheduler_period_ms);
/* Initialize lookup tables */
win32_init_vk_btn_table();
/* Set up TLS */
L.thread_tls_index = TlsAlloc();
if (L.thread_tls_index == TLS_OUT_OF_INDEXES) {
/* TODO: GetLastError */
error_msg = L"Platform initialization error: TLS_OUT_OF_INDEXES";
goto abort;
}
/* Initialize main thread context.
* This must happen before scratch memory can be used. */
struct win32_tls main_thread_tls = win32_tls_alloc();
win32_thread_set_tls(&main_thread_tls);
/* Set up condition variables */
L.condition_variables_mutex = sys_mutex_alloc();
L.condition_variables_arena = arena_alloc(GIGABYTE(64));
@ -1909,6 +1900,17 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
L.windows_mutex = sys_mutex_alloc();
L.windows_arena = arena_alloc(GIGABYTE(64));
/* Set up TLS index */
L.thread_tls_index = TlsAlloc();
if (L.thread_tls_index == TLS_OUT_OF_INDEXES) {
/* TODO: GetLastError */
error_msg = L"Platform initialization error: TLS_OUT_OF_INDEXES";
goto abort;
}
/* Initialize vk table */
win32_init_vk_btn_table();
/* Create window class */
{
/* Register the window class */
@ -1948,6 +1950,14 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
}
}
/* ========================== *
* Main thread setup
* ========================== */
/* Initialize main thread context. */
struct win32_tls main_thread_tls = win32_tls_alloc();
win32_thread_set_tls(&main_thread_tls);
/* Create app thread & wait for return */
struct sys_thread app_thread = sys_thread_init(&app_thread_entry_point, NULL, STR("[P9] App thread"));
sys_thread_join(&app_thread);
@ -1962,9 +1972,6 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
win32_tls_release(&main_thread_tls);
/* Uninitialize COM */
CoUninitialize();
return 0;
}

114
src/tls.c
View File

@ -1,63 +1,95 @@
#include "tls.h"
#include "sys.h"
#include "arena.h"
#include "atomic.h"
#include "intrinsics.h"
#include "scratch.h"
#include "work.h"
#define THREAD_LOCAL_TABLE_RESERVE (MEGABYTE(1))
#define TLS_TABLE_RESERVE (MEGABYTE(1))
GLOBAL struct {
struct atomic_i64 tls_metas_lock_flag;
u64 tls_metas_count;
struct thread_local_var_meta tls_metas[MAX_THREAD_LOCAL_VARS];
} L = { 0 }, DEBUG_LVAR(L_tls);
struct tls_info {
u64 size;
u64 align;
tls_alloc_func *alloc;
tls_release_func *release;
};
#define X(tls_name, tls_struct, tls_alloc_func_type, tls_release_func_type) { .size = sizeof(tls_struct), .align = ALIGNOF(tls_struct), .alloc = tls_alloc_func_type, .release = tls_release_func_type },
GLOBAL READONLY struct tls_info g_tls_info_table[TLS_IDENTIFIER_COUNT] = {
TLS_TABLE(X)
};
#undef X
struct tls_table tls_table_alloc(void)
INTERNAL void tls_metas_lock(void)
{
struct tls_table t = { 0 };
t.arena = arena_alloc(TLS_TABLE_RESERVE);
while (atomic_i64_eval_compare_exchange(&L.tls_metas_lock_flag, 0, 1) == 0) {
/* Spinlock */
ix_pause();
}
}
INTERNAL void tls_metas_unlock(void)
{
atomic_i64_eval_exchange(&L.tls_metas_lock_flag, 0);
}
struct thread_local_store thread_local_store_alloc(void)
{
__prof;
struct thread_local_store t = { 0 };
t.arena = arena_alloc(THREAD_LOCAL_TABLE_RESERVE);
return t;
}
void tls_table_release(struct tls_table *t)
void thread_local_store_release(struct thread_local_store *t)
{
__prof;
/* Release allocated tls data in reverse order */
for (u64 index_plus_one = t->allocation_order_count; index_plus_one > 0; --index_plus_one) {
enum tls_identifier identifier = t->allocation_order[index_plus_one - 1];
void *data = t->lookup[identifier];
struct tls_info *info = &g_tls_info_table[identifier];
if (info->release) {
info->release(data);
tls_metas_lock();
{
for (u64 i = t->allocation_order_count; i-- > 0;) {
u64 id = t->allocation_order[i];
void *data = t->lookup[id];
struct thread_local_var_meta *meta = &L.tls_metas[id];
if (meta->release) {
meta->release(data);
}
}
}
tls_metas_unlock();
arena_release(&t->arena);
}
void *tls_get(enum tls_identifier identifier)
void *_thread_local_eval(struct thread_local_var_meta *meta)
{
struct tls_table *t = sys_thread_get_tls();
void **data_slot = &t->lookup[identifier];
if (!*data_slot) {
/* Allocate */
struct tls_info *info = &g_tls_info_table[identifier];
arena_align(&t->arena, info->align);
*data_slot = arena_push_array(&t->arena, u8, info->size);
if (info->alloc) {
info->alloc(*data_slot);
} else {
MEMZERO(*data_slot, info->size);
/* Register var if unregistered */
u64 id;
{
u64 id_plus_one = atomic_u64_eval(&meta->id_plus_one);
if (id_plus_one == 0) {
__profscope(_thread_local_eval__REGISTER);
tls_metas_lock();
{
id = L.tls_metas_count++;
if (id >= MAX_THREAD_LOCAL_VARS) {
sys_panic_raw("Maximum number of thread local variables reached");
}
t->allocation_order[t->allocation_order_count] = identifier;
++t->allocation_order_count;
atomic_u64_eval_exchange(&meta->id_plus_one, id + 1);
L.tls_metas[id] = *meta;
}
tls_metas_unlock();
} else {
id = id_plus_one - 1;
}
}
/* Allocate var for thread if unallocated */
struct thread_local_store *t = sys_thread_get_thread_local_store();
void **data_slot = &t->lookup[id];
if (!*data_slot) {
__profscope(_thread_local_eval__ALLOC);
/* Allocate */
arena_align(&t->arena, meta->align);
*data_slot = arena_push_array(&t->arena, u8, meta->size);
if (meta->alloc) {
meta->alloc(*data_slot);
} else {
MEMZERO(*data_slot, meta->size);
}
t->allocation_order[t->allocation_order_count++] = id;
}
return *data_slot;

View File

@ -1,42 +1,62 @@
#ifndef TLS_H
#define TLS_H
/* ========================================================================== */
/* TLS table (X macro) */
/* ========================================================================== */
#define MAX_THREAD_LOCAL_VARS 1024
#define TLS_TABLE(X) \
X(SCRATCH_TLS, struct scratch_tls, &scratch_tls_alloc, &scratch_tls_release) \
X(WORKER_TLS, struct worker_tls, NULL, NULL )
/* ========================== *
* Thread local store
* ========================== */
/* ========================================================================== */
/* ========================================================================== */
/* ========================================================================== */
#include "sys.h"
#define TLS_ALLOC_FUNC_DEF(name, arg_name) void name(void *arg_name)
typedef TLS_ALLOC_FUNC_DEF(tls_alloc_func, tls_struct);
#define TLS_RELEASE_FUNC_DEF(name, arg_name) void name(void *arg_name)
typedef TLS_RELEASE_FUNC_DEF(tls_release_func, tls_struct);
#define X(identifier, tls_struct, tls_alloc_func_type, tls_release_func_type) identifier,
enum tls_identifier {
TLS_TABLE(X)
TLS_IDENTIFIER_COUNT
};
#undef X
struct tls_table {
struct thread_local_store {
void *lookup[MAX_THREAD_LOCAL_VARS];
struct arena arena;
void *lookup[TLS_IDENTIFIER_COUNT];
enum tls_identifier allocation_order[TLS_IDENTIFIER_COUNT];
u64 allocation_order_count;
u64 allocation_order[MAX_THREAD_LOCAL_VARS];
};
struct tls_table tls_table_alloc(void);
void tls_table_release(struct tls_table *t);
void *tls_get(enum tls_identifier identifier);
struct thread_local_store thread_local_store_alloc(void);
void thread_local_store_release(struct thread_local_store *t);
/* ========================== *
* Thread local var
* ========================== */
#define THREAD_LOCAL_VAR_ALLOC_FUNC_DEF(name, arg_name) void name(void *arg_name)
typedef THREAD_LOCAL_VAR_ALLOC_FUNC_DEF(thread_local_var_alloc_func, tls_struct);
#define THREAD_LOCAL_VAR_RELEASE_FUNC_DEF(name, arg_name) void name(void *arg_name)
typedef THREAD_LOCAL_VAR_RELEASE_FUNC_DEF(thread_local_var_release_func, tls_struct);
struct thread_local_var_meta {
struct atomic_u64 id_plus_one;
u64 size;
u64 align;
thread_local_var_alloc_func *alloc;
thread_local_var_release_func *release;
};
#define THREAD_LOCAL_VAR_DEF(var_name, type, alloc_func, release_func) \
struct { struct thread_local_var_meta meta; type *_t; } var_name = { \
.meta = { \
.size = sizeof(type), \
.align = alignof(type), \
.alloc = (alloc_func), \
.release = (release_func) \
} \
}
#define THREAD_LOCAL_VAR_DECL_EXTERN(var_name, type) struct __thread_local_struct##var_name { struct thread_local_var_meta meta; type *_t; }; extern struct __thread_local_struct##var_name var_name;
#define THREAD_LOCAL_VAR_DEF_EXTERN(var_name, type, alloc_func, release_func) \
typeof(var_name) var_name = { \
.meta = { \
.size = sizeof(type), \
.align = alignof(type), \
.alloc = (alloc_func), \
.release = (release_func) \
} \
}
#define thread_local_eval(var_ptr) (typeof((var_ptr)->_t))(_thread_local_eval(&(var_ptr)->meta));
void *_thread_local_eval(struct thread_local_var_meta *meta);
#endif

View File

@ -10,12 +10,31 @@
#include "string.h"
#include "scratch.h"
#include "math.h"
#include "console.h"
#include "sys.h"
#include "world.h"
#include "entity.h"
#include "mixer.h"
/* FIXME: remove this (testing) */
#include "utf.h"
GLOBAL u8 test_input_array[256] = { 0 };
GLOBAL u32 test_input_array_pos;
struct bind_state {
b32 is_held; /* Is this bind held down this frame */
u32 num_presses; /* How many times was this bind pressed since last frame */
@ -101,7 +120,7 @@ INTERNAL struct sys_event_array pop_sys_events(struct arena *arena)
sys_mutex_lock(&L.sys_events_mutex);
{
struct buffer events_buff = arena_to_buffer(&L.sys_events_arena);
arena_align(arena, ALIGNOF(struct sys_event));
arena_align(arena, alignof(struct sys_event));
array.events = (struct sys_event *)arena_push_array(arena, u8, events_buff.size);
array.count = events_buff.size / sizeof(struct sys_event);
MEMCPY(array.events, events_buff.data, events_buff.size);
@ -449,11 +468,6 @@ INTERNAL void user_update(void)
for (u64 entity_index = 0; entity_index < events.count; ++entity_index) {
struct sys_event *event = &events.events[entity_index];
/* Send event to console. Skip if consumed. */
if (console_process_event(*event)) {
continue;
}
if (event->kind == SYS_EVENT_KIND_QUIT) {
app_quit();
}
@ -494,6 +508,28 @@ INTERNAL void user_update(void)
}
}
}
/* FIXME: Remove this (testing) */
/* Test unicode input */
if (event->kind == SYS_EVENT_KIND_TEXT) {
u32 codepoint = event->text_codepoint;
struct utf8_encode_result encoded = utf8_encode(codepoint);
MEMCPY(&test_input_array[test_input_array_pos], encoded.chars8, encoded.count8);
test_input_array_pos += encoded.count8;
/* Print */
if (test_input_array_pos > ARRAY_COUNT(test_input_array) - 4) {
struct string str = { .len = test_input_array_pos, .text = test_input_array };
sys_message_box(SYS_MESSAGE_BOX_KIND_OK, str);
test_input_array_pos = 0;
}
}
if (event->kind == SYS_EVENT_KIND_BUTTON_DOWN && event->button == SYS_BTN_BACKSPACE) {
if (test_input_array_pos > 0) {
--test_input_array_pos;
}
}
}
/* ========================== *
@ -978,6 +1014,54 @@ INTERNAL void user_update(void)
arena_temp_end(temp);
}
/* TODO: remove this */
/* Draw test input string */
{
struct font *font = font_load(STR("res/fonts/fixedsys.ttf"), 12.0f);
//struct font *font = font_load(STR("res/fonts/Inconsolata-Regular.ttf"), 12.0f);
#if 0
struct rect atlas_rect = {
.pos = V2(0, 0),
.size = font->texture.size
};
struct draw_texture_params dparams = DRAW_TEXTURE_PARAMS(.texture = &font->texture);
draw_texture_rect(L.viewport_canvas, dparams, atlas_rect);
#endif
struct string src_str = (struct string) { .len = test_input_array_pos, .text = test_input_array };
struct string draw_str = string_copy(scratch.arena, src_str);
{
u64 codepoint_count = 0;
u64 pos = 0;
while (pos < src_str.len) {
struct string remaining = { .len = src_str.len - pos, .text = src_str.text + pos };
struct utf8_decode_result decoded = utf8_decode(remaining);
if (codepoint_count % 10 == 0) {
draw_str.len += string_copy(scratch.arena, STR("\n")).len;
}
draw_str.len += string_copy(scratch.arena, STR("[")).len;
draw_str.len += string_from_uint(scratch.arena, decoded.codepoint, 16).len;
draw_str.len += string_copy(scratch.arena, STR("]: '")).len;
struct string character = { .len = decoded.advance8, .text = remaining.text };
draw_str.len += string_copy(scratch.arena, character).len;
draw_str.len += string_copy(scratch.arena, STR("' ")).len;
pos += decoded.advance8;
++codepoint_count;
}
}
//struct v2 pos = v2_round(V2(0, L.viewport_size.y / 2));
struct v2 pos = v2_round(V2(0, 0));
draw_text(L.viewport_canvas, font, pos, draw_str);
}
/* Push game cmds */
pubilsh_game_cmds(&cmd_list);

View File

@ -137,7 +137,6 @@ struct sync_flag {
INLINE struct sync_flag sync_flag_alloc(void)
{
__prof;
struct sync_flag sf = { 0 };
sf.mutex = sys_mutex_alloc();
sf.cv = sys_condition_variable_alloc();
@ -146,7 +145,6 @@ INLINE struct sync_flag sync_flag_alloc(void)
INLINE void sync_flag_release(struct sync_flag *sf)
{
__prof;
sys_mutex_release(&sf->mutex);
sys_condition_variable_release(&sf->cv);
}
@ -155,13 +153,8 @@ INLINE void sync_flag_set(struct sync_flag *sf)
{
__prof;
if (atomic_i32_eval_compare_exchange(&sf->flag, 0, 1) == 0) {
sys_mutex_lock(&sf->mutex);
{
atomic_i32_eval_exchange(&sf->flag, 1);
sys_condition_variable_signal(&sf->cv);
}
sys_mutex_unlock(&sf->mutex);
}
}
INLINE void sync_flag_wait(struct sync_flag *sf)
@ -187,7 +180,7 @@ INLINE void sleep_frame(sys_timestamp_t last_frame_time, f64 target_dt)
f64 last_frame_dt = sys_timestamp_seconds(sys_timestamp() - last_frame_time);
f64 sleep_time = target_dt - last_frame_dt;
if (sleep_time > 0) {
sys_sleep(sleep_time);
sys_sleep_precise(sleep_time);
}
}
}

View File

@ -6,6 +6,7 @@
#include "memory.h"
#include "string.h"
#include "log.h"
#include "tls.h"
/* Terminology:
*
@ -87,12 +88,22 @@ GLOBAL struct {
struct work *scheduled_work_priority_tails[NUM_WORK_PRIORITIES];
} L = { 0 }, DEBUG_LVAR(L_work);
INTERNAL void worker_thread_entry_point(void *thread_data);
/* ========================== *
* Thread local state
* ========================== */
struct worker_ctx {
b32 is_worker;
};
GLOBAL THREAD_LOCAL_VAR_DEF(tl_worker_ctx, struct worker_ctx, NULL, NULL);
/* ========================== *
* Startup
* ========================== */
INTERNAL void worker_thread_entry_point(void *thread_data);
struct work_startup_receipt work_startup(u32 num_worker_threads)
{
struct temp_arena scratch = scratch_begin_no_conflict();
@ -368,8 +379,8 @@ INTERNAL void worker_thread_entry_point(void *thread_data)
{
(UNUSED)thread_data;
struct worker_tls *ctx = tls_get(WORKER_TLS);
*ctx = (struct worker_tls) {
struct worker_ctx *ctx = thread_local_eval(&tl_worker_ctx);
*ctx = (struct worker_ctx) {
.is_worker = true
};
@ -434,7 +445,7 @@ INTERNAL struct work_handle work_push_from_slate_assume_locked(struct work_slate
* does not occur. However it is not ideal since it creates situations in
* which work is not done asynchronously.
*/
struct worker_tls *ctx = tls_get(WORKER_TLS);
struct worker_ctx *ctx = thread_local_eval(&tl_worker_ctx);
if (ctx->is_worker) {
b32 more_tasks = true;
while (L.idle_worker_count == 0 && work->workers == 0 && more_tasks) {

View File

@ -31,10 +31,6 @@ struct work_slate {
u32 num_tasks;
};
struct worker_tls {
b32 is_worker;
};
struct work_startup_receipt { i32 _; };
struct work_startup_receipt work_startup(u32 num_worker_threads);
void work_shutdown(void);