power_play/src/sys_win32.c
2025-07-14 13:50:19 -05:00

3451 lines
114 KiB
C

#include "sys.h"
#include "memory.h"
#include "arena.h"
#include "app.h"
#include "string.h"
#include "atomic.h"
#include "log.h"
#include "math.h"
#include "util.h"
#include "uni.h"
#include "resource.h"
#pragma warning(push, 0)
# define UNICODE
# include <Windows.h>
# include <windowsx.h>
# include <ShlObj_core.h>
# include <fileapi.h>
# include <dwmapi.h>
# include <bcrypt.h>
# include <avrt.h>
# include <TlHelp32.h>
#pragma warning(pop)
#pragma comment(lib, "kernel32")
#pragma comment(lib, "user32")
#pragma comment(lib, "shell32")
#pragma comment(lib, "ole32")
#pragma comment(lib, "winmm")
#pragma comment(lib, "dwmapi")
#pragma comment(lib, "bcrypt")
#pragma comment(lib, "synchronization")
#pragma comment(lib, "avrt")
#define SYS_WINDOW_EVENT_LISTENERS_MAX 512
#define WINDOW_CLASS_NAME L"power_play_window_class"
#define THREAD_STACK_SIZE KIBI(64)
#define FIBER_STACK_SIZE MEBI(4)
/* Assume scheduler cycle is 20hz at start to be conservative */
#define DEFAULT_SCHEDULER_CYCLE_PERIOD_NS 50000000
#define NUM_ROLLING_SCHEDULER_PERIODS 1000
#define FIBER_NAME_PREFIX_CSTR "Fiber ["
#define FIBER_NAME_SUFFIX_CSTR "]"
#define FIBER_NAME_MAX_SIZE 64
#define NUM_WAIT_ADDR_BINS 16384
#define NUM_WAIT_TIME_BINS 1024
#define MAX_EXIT_FUNCS 1024
/* Arbitrary threshold for determining when to fall back from a looped WakeByAddressSingle to WakeByAddressAll */
#define WAKE_ALL_THRESHOLD 24
struct ticket_mutex {
struct atomic64_padded ticket;
struct atomic64_padded serving;
};
struct win32_thread {
sys_thread_func *entry_point;
void *thread_data;
char thread_name_cstr[256];
wchar_t thread_name_wstr[256];
i32 profiler_group;
struct win32_thread *next;
struct win32_thread *prev;
HANDLE handle;
};
enum win32_window_cursor_set_flag {
WIN32_WINDOW_CURSOR_SET_FLAG_NONE = 0x0,
WIN32_WINDOW_CURSOR_SET_FLAG_POSITION = 0x1,
WIN32_WINDOW_CURSOR_SET_FLAG_HIDE = 0x2,
WIN32_WINDOW_CURSOR_SET_FLAG_SHOW = 0x4,
WIN32_WINDOW_CURSOR_SET_FLAG_ENABLE_CLIP = 0X8,
WIN32_WINDOW_CURSOR_SET_FLAG_DISABLE_CLIP = 0X10
};
struct win32_window {
u32 flags;
HWND hwnd;
u32 tid;
struct snc_counter ready_fence;
u16 utf16_high_surrogate_last_input;
struct snc_mutex settings_mutex;
struct sys_window_settings settings;
i32 monitor_width;
i32 monitor_height;
/* NOTE: width & height are unaffected by window minimization (they retain
* their pre-minimized values) */
i32 x, y, width, height;
/* FIXME: Use a cmd buffer for updating cursor (and maybe also settings).
* Current setup means cursor_set calls can be applied out of order */
u32 cursor_set_flags;
struct v2 cursor_set_position;
struct rect cursor_clip_bounds;
struct snc_mutex event_arena_swp_mutex;
i32 current_event_arena_index;
struct arena *event_arenas[2];
struct sys_thread *window_thread;
struct atomic32 shutdown;
struct win32_window *next_free;
};
struct alignas(64) wait_list {
u64 value;
i16 first_waiter;
i16 last_waiter;
i32 num_waiters;
struct wait_list *next_in_bin;
struct wait_list *prev_in_bin;
};
STATIC_ASSERT(alignof(struct wait_list) == 64); /* Avoid false sharing */
struct alignas(64) wait_bin {
struct wait_list *first_wait_list;
struct wait_list *last_wait_list;
struct wait_list *first_free_wait_list;
struct ticket_mutex lock;
};
STATIC_ASSERT(alignof(struct wait_bin) == 64); /* Avoid false sharing */
enum yield_kind {
YIELD_KIND_NONE,
YIELD_KIND_DONE,
YIELD_KIND_WAIT,
NUM_YIELD_KINDS
};
struct yield_param {
enum yield_kind kind;
union {
struct {
volatile void *addr;
void *cmp;
u32 size;
i64 timeout_ns;
} wait;
};
};
struct alignas(64) fiber {
/* ==================================================== */
void *addr; /* 08 bytes */
/* ==================================================== */
char *name_cstr; /* 08 bytes */
/* ==================================================== */
struct atomic32 wake_lock; /* 04 bytes (4 byte alignment) */
i16 id; /* 02 bytes */
i16 parent_id; /* 02 bytes */
/* ==================================================== */
u64 wait_addr; /* 08 bytes */
/* ==================================================== */
u64 wait_time; /* 08 bytes */
/* ==================================================== */
i16 next_addr_waiter; /* 02 bytes */
i16 prev_addr_waiter; /* 02 bytes */
i16 next_time_waiter; /* 02 bytes */
i16 prev_time_waiter; /* 02 bytes */
/* ==================================================== */
u8 _pad1[8]; /* 08 bytes (padding) */
/* ==================================================== */
u8 _pad2[8]; /* 08 bytes (padding) */
/* ==================================================== */
/* ==================== Cache line ==================== */
/* ==================================================== */
struct sys_scratch_ctx scratch_ctx; /* 16 bytes */
/* ==================================================== */
sys_job_func *job_func; /* 08 bytes */
/* ==================================================== */
void *job_sig; /* 08 bytes */
/* ==================================================== */
i32 job_id; /* 04 bytes */
i16 job_pool; /* 02 bytes */
i16 job_priority; /* 02 bytes */
/* ==================================================== */
struct snc_counter *job_counter; /* 08 bytes */
/* ==================================================== */
struct yield_param *yield_param; /* 08 bytes */
/* ==================================================== */
u8 _pad3[8]; /* 08 bytes (padding) */
};
STATIC_ASSERT(sizeof(struct fiber) == 128); /* Padding validation (increase if necessary) */
STATIC_ASSERT(alignof(struct fiber) == 64); /* Avoid false sharing */
STATIC_ASSERT(offsetof(struct fiber, wake_lock) % 4 == 0); /* Atomic must be aligned */
STATIC_ASSERT(SYS_MAX_FIBERS < I16_MAX); /* Max fibers should fit in fiber id */
struct alignas(64) worker_ctx {
enum sys_pool pool_kind;
i32 id;
};
struct job_info {
i32 num_dispatched;
i32 count;
sys_job_func *func;
void *sig;
struct snc_counter *counter;
i16 fiber_id; /* If the job is being resumed from a yield */
struct job_info *next;
};
struct alignas(64) job_queue {
struct ticket_mutex lock;
struct arena *arena;
struct job_info *first;
struct job_info *last;
struct job_info *first_free;
};
struct alignas(64) job_pool {
/* Jobs */
struct job_queue job_queues[NUM_SYS_PRIORITIES];
struct ticket_mutex free_fibers_lock;
i16 first_free_fiber_id;
/* Workers */
struct atomic32_padded workers_shutdown;
struct atomic64_padded num_jobs_in_queue;
struct snc_mutex workers_wake_mutex;
struct snc_cv workers_wake_cv;
i32 num_worker_threads;
i32 thread_priority;
u64 thread_affinity_mask;
b32 thread_is_audio;
struct arena *worker_threads_arena;
struct sys_thread **worker_threads;
struct worker_ctx *worker_contexts;
};
/* ========================== *
* Global state
* ========================== */
GLOBAL struct {
SYSTEM_INFO info;
i64 timer_start_qpc;
i64 ns_per_qpc;
u32 main_thread_id;
struct atomic32 shutdown;
wchar_t cmdline_args_wstr[8192];
/* Application control flow */
struct atomic32 panicking;
wchar_t panic_wstr[4096];
HANDLE panic_event;
HANDLE startup_end_event;
HANDLE exit_begin_event;
HANDLE exit_end_event;
/* Key lookup table */
enum sys_btn vk_btn_table[256];
/* Threads pool */
struct snc_mutex threads_mutex;
struct arena *threads_arena;
struct win32_thread *threads_first;
struct win32_thread *threads_last;
struct win32_thread *threads_first_free;
/* Watches pool */
struct snc_mutex watches_mutex;
struct arena *watches_arena;
struct win32_watch *watches_first_free;
/* Windows pool */
WNDCLASSEXW window_class;
struct snc_mutex windows_mutex;
struct arena *windows_arena;
struct win32_window *first_free_window;
/* Exit funcs */
struct atomic32 num_exit_funcs;
sys_exit_func *exit_funcs[MAX_EXIT_FUNCS];
/* Scheduler */
struct atomic64_padded current_scheduler_cycle;
struct atomic64_padded current_scheduler_cycle_period_ns;
/* Fibers */
struct ticket_mutex fibers_lock;
i16 num_fibers;
struct arena *fiber_names_arena;
struct fiber fibers[SYS_MAX_FIBERS];
/* Wait lists */
struct atomic64_padded waiter_wake_gen;
struct ticket_mutex wait_lists_arena_lock;
struct arena *wait_lists_arena;
/* Wait tables */
struct wait_bin wait_addr_bins[NUM_WAIT_ADDR_BINS];
struct wait_bin wait_time_bins[NUM_WAIT_TIME_BINS];
/* Job pools */
struct job_pool job_pools[NUM_SYS_POOLS];
} G = ZI, DEBUG_ALIAS(G, G_sys_win32);
INTERNAL struct fiber *fiber_from_id(i16 id);
INTERNAL void job_fiber_yield(struct fiber *fiber, struct fiber *parent_fiber);
/* ========================== *
* Ticket mutex
* ========================== */
INTERNAL void tm_lock(struct ticket_mutex *tm)
{
i64 ticket = atomic64_fetch_add(&tm->ticket.v, 1);
while (atomic64_fetch(&tm->serving.v) != ticket) {
ix_pause();
}
}
INTERNAL void tm_unlock(struct ticket_mutex *tm)
{
atomic64_fetch_add(&tm->serving.v, 1);
}
/* ========================== *
* Exit
* ========================== */
void sys_on_exit(sys_exit_func *func)
{
i32 index = atomic32_fetch_add(&G.num_exit_funcs, 1);
if (index >= MAX_EXIT_FUNCS) {
sys_panic(LIT("Maximum on exit functions registered"));
}
G.exit_funcs[index] = func;
}
void sys_exit(void)
{
SetEvent(G.exit_begin_event);
}
void sys_panic(struct string msg)
{
if (atomic32_fetch_test_set(&G.panicking, 0, 1) == 0) {
log_panic(msg);
wchar_t *wstr = G.panic_wstr;
u64 wstr_len = 0;
wchar_t prefix[] = L"A fatal error has occured and the application needs to exit:\n\n";
MEMCPY(wstr, prefix, min_u64(countof(G.panic_wstr), (countof(prefix) << 1)));
wstr_len += countof(prefix) - 1;
/* Perform manual string encode to avoid any implicit memory
* allocation (in case allocation is unreliable) */
struct string str8 = msg;
u64 pos8 = 0;
while (pos8 < str8.len) {
struct string str8_remaining = { .len = (str8.len - pos8), .text = str8.text + pos8 };
struct uni_decode_utf8_result decoded = uni_decode_utf8(str8_remaining);
struct uni_encode_utf16_result encoded = uni_encode_utf16(decoded.codepoint);
u64 wstr_new_len = wstr_len + encoded.count16;
if (wstr_new_len < (countof(G.panic_wstr) - 1)) {
u16 *dest = wstr + wstr_len;
MEMCPY(dest, encoded.chars16, (encoded.count16 << 1));
wstr_len = wstr_new_len;
pos8 += decoded.advance8;
} else {
break;
}
}
wstr[wstr_len] = 0;
#if RTC
MessageBoxExW(0, wstr, L"Fatal error", MB_ICONSTOP | MB_SETFOREGROUND | MB_TOPMOST, 0);
ASSERT(0);
#endif
SetEvent(G.panic_event);
/* Wait for process termination */
if (GetCurrentThreadId() != G.main_thread_id) {
Sleep(INFINITE);
}
}
}
/* ========================== *
* Scheduler
* ========================== */
i64 sys_current_scheduler_period_ns(void)
{
return atomic64_fetch(&G.current_scheduler_cycle_period_ns.v);
}
/* ========================== *
* Wait / wake
* ========================== */
void sys_wait(void *addr, void *cmp, u32 size, i64 timeout_ns)
{
struct fiber *fiber = fiber_from_id(sys_current_fiber_id());
i16 parent_id = fiber->parent_id;
if (parent_id != 0) {
*fiber->yield_param = (struct yield_param) {
.kind = YIELD_KIND_WAIT,
.wait = {
.addr = addr,
.cmp = cmp,
.size = size,
.timeout_ns = timeout_ns
}
};
job_fiber_yield(fiber, fiber_from_id(parent_id));
} else {
i32 timeout_ms = 0;
if (timeout_ns == I64_MAX) {
timeout_ms = INFINITE;
} else if (timeout_ns != 0) {
timeout_ms = timeout_ns / 1000000;
timeout_ms += (timeout_ms == 0) * math_fsign(timeout_ns);
}
if (addr == 0) {
Sleep(timeout_ms);
} else {
WaitOnAddress(addr, cmp, size, timeout_ms);
}
}
}
/* REQUIRED: Caller must have acquired `wake_lock` for each fiber in array */
INTERNAL void wake_fibers_locked(i32 num_fibers, struct fiber **fibers)
{
/* Update wait lists */
for (i32 i = 0; i < num_fibers; ++i) {
struct fiber *fiber = fibers[i];
u64 wait_addr = fiber->wait_addr;
u64 wait_time = fiber->wait_time;
/* Lock & search wait bins */
/* TODO: Cache these in parameters since caller has one of them already calculated */
struct wait_bin *wait_addr_bin = 0;
struct wait_bin *wait_time_bin = 0;
struct wait_list *wait_addr_list = 0;
struct wait_list *wait_time_list = 0;
if (wait_addr != 0) {
wait_addr_bin = &G.wait_addr_bins[wait_addr % NUM_WAIT_ADDR_BINS];
tm_lock(&wait_addr_bin->lock);
for (struct wait_list *tmp = wait_addr_bin->first_wait_list; tmp && !wait_addr_list; tmp = tmp->next_in_bin) {
if (tmp->value == (u64)wait_addr) {
wait_addr_list = tmp;
}
}
}
if (wait_time != 0) {
wait_time_bin = &G.wait_time_bins[wait_time % NUM_WAIT_TIME_BINS];
tm_lock(&wait_time_bin->lock);
for (struct wait_list *tmp = wait_time_bin->first_wait_list; tmp && !wait_time_list; tmp = tmp->next_in_bin) {
if (tmp->value == (u64)wait_time) {
wait_time_list = tmp;
}
}
}
{
/* Remove from addr list */
if (wait_addr_list) {
if (--wait_addr_list->num_waiters == 0) {
/* Free addr list */
struct wait_list *prev = wait_addr_list->prev_in_bin;
struct wait_list *next = wait_addr_list->next_in_bin;
if (prev) {
prev->next_in_bin = next;
} else {
wait_addr_bin->first_wait_list = next;
}
if (next) {
next->prev_in_bin = prev;
} else {
wait_addr_bin->last_wait_list = prev;
}
wait_addr_list->next_in_bin = wait_addr_bin->first_free_wait_list;
wait_addr_bin->first_free_wait_list = wait_addr_list;
} else {
i16 prev_id = fiber->prev_addr_waiter;
i16 next_id = fiber->next_addr_waiter;
if (prev_id) {
fiber_from_id(prev_id)->next_addr_waiter = next_id;
} else {
wait_addr_list->first_waiter = next_id;
}
if (next_id) {
fiber_from_id(next_id)->prev_addr_waiter = prev_id;
} else {
wait_addr_list->last_waiter = prev_id;
}
}
fiber->wait_addr = 0;
fiber->prev_addr_waiter = 0;
fiber->next_addr_waiter = 0;
}
/* Remove from time list */
if (wait_time_list) {
if (--wait_time_list->num_waiters == 0) {
/* Free time list */
struct wait_list *prev = wait_time_list->prev_in_bin;
struct wait_list *next = wait_time_list->next_in_bin;
if (prev) {
prev->next_in_bin = next;
} else {
wait_time_bin->first_wait_list = next;
}
if (next) {
next->prev_in_bin = prev;
} else {
wait_time_bin->last_wait_list = prev;
}
wait_time_list->next_in_bin = wait_time_bin->first_free_wait_list;
wait_time_bin->first_free_wait_list = wait_time_list;
} else {
i16 prev_id = fiber->prev_time_waiter;
i16 next_id = fiber->next_time_waiter;
if (prev_id) {
fiber_from_id(prev_id)->next_time_waiter = next_id;
} else {
wait_time_list->first_waiter = next_id;
}
if (next_id) {
fiber_from_id(next_id)->prev_time_waiter = prev_id;
} else {
wait_time_list->last_waiter = prev_id;
}
}
fiber->wait_time = 0;
fiber->prev_time_waiter = 0;
fiber->next_time_waiter = 0;
}
/* Unlock fiber */
atomic32_fetch_set(&fiber->wake_lock, 0);
}
/* Unlock wait bins */
if (wait_time_bin != 0) tm_unlock(&wait_time_bin->lock);
if (wait_addr_bin != 0) tm_unlock(&wait_addr_bin->lock);
}
/* Resume jobs */
/* TODO: Batch submit waiters based on queue kind rather than one at a time */
i32 num_workers_to_wake[NUM_SYS_POOLS] = ZI;
for (i32 i = 0; i < num_fibers; ++i) {
struct fiber *fiber = fibers[i];
enum sys_pool pool_kind = fiber->job_pool;
++num_workers_to_wake[pool_kind];
struct job_pool *pool = &G.job_pools[pool_kind];
struct job_queue *queue = &pool->job_queues[fiber->job_priority];
tm_lock(&queue->lock);
{
struct job_info *info = 0;
if (queue->first_free) {
info = queue->first_free;
queue->first_free = info->next;
} else {
info = arena_push_no_zero(queue->arena, struct job_info);
}
MEMZERO_STRUCT(info);
info->count = 1;
info->num_dispatched = fiber->job_id;
info->func = fiber->job_func;
info->sig = fiber->job_sig;
info->counter = fiber->job_counter;
info->fiber_id = fiber->id;
if (queue->last) {
queue->last->next = info;
} else {
queue->first = info;
}
queue->last = info;
}
tm_unlock(&queue->lock);
}
/* Wake workers */
if (num_fibers > 0) {
for (enum sys_pool pool_kind = 0; pool_kind < (i32)countof(num_workers_to_wake); ++pool_kind) {
i32 wake_count = num_workers_to_wake[pool_kind];
if (wake_count > 0) {
struct job_pool *pool = &G.job_pools[pool_kind];
struct snc_lock lock = snc_lock_e(&pool->workers_wake_mutex);
{
atomic64_fetch_add(&pool->num_jobs_in_queue.v, wake_count);
snc_cv_signal(&pool->workers_wake_cv, wake_count);
}
snc_unlock(&lock);
}
}
}
}
INTERNAL void wake_address(void *addr, i32 count)
{
struct arena_temp scratch = scratch_begin_no_conflict();
u64 wait_addr_bin_index = (u64)addr % NUM_WAIT_ADDR_BINS;
struct wait_bin *wait_addr_bin = &G.wait_addr_bins[wait_addr_bin_index];
struct wait_list *wait_addr_list = 0;
/* Get list of waiting fibers */
i32 num_fibers = 0;
struct fiber **fibers = 0;
{
tm_lock(&wait_addr_bin->lock);
{
/* Search for wait addr list */
for (struct wait_list *tmp = wait_addr_bin->first_wait_list; tmp && !wait_addr_list; tmp = tmp->next_in_bin) {
if (tmp->value == (u64)addr) {
wait_addr_list = tmp;
}
}
/* Lock fibers & build array */
if (wait_addr_list) {
fibers = arena_push_array_no_zero(scratch.arena, struct fiber *, wait_addr_list->num_waiters);
for (struct fiber *fiber = fiber_from_id(wait_addr_list->first_waiter); fiber && num_fibers < count; fiber = fiber_from_id(fiber->next_addr_waiter)) {
if (atomic32_fetch_test_set(&fiber->wake_lock, 0, 1) == 0) {
fibers[num_fibers] = fiber;
++num_fibers;
}
}
}
}
tm_unlock(&wait_addr_bin->lock);
}
if (num_fibers > 0) {
wake_fibers_locked(num_fibers, fibers);
}
/* Wake win32 blocking thread waiters */
if (count >= WAKE_ALL_THRESHOLD) {
WakeByAddressAll(addr);
} else {
for (i32 i = 0; i < count; ++i) {
WakeByAddressSingle(addr);
}
}
scratch_end(scratch);
}
INTERNAL void wake_time(u64 time)
{
struct arena_temp scratch = scratch_begin_no_conflict();
u64 wait_time_bin_index = (u64)time % NUM_WAIT_TIME_BINS;
struct wait_bin *wait_time_bin = &G.wait_time_bins[wait_time_bin_index];
struct wait_list *wait_time_list = 0;
/* Build list of waiters to resume */
i32 num_fibers = 0;
struct fiber **fibers = 0;
{
tm_lock(&wait_time_bin->lock);
{
/* Search for wait time list */
for (struct wait_list *tmp = wait_time_bin->first_wait_list; tmp && !wait_time_list; tmp = tmp->next_in_bin) {
if (tmp->value == (u64)time) {
wait_time_list = tmp;
}
}
if (wait_time_list) {
/* Set waiter wake status & build fibers list */
fibers = arena_push_array_no_zero(scratch.arena, struct fiber *, wait_time_list->num_waiters);
for (struct fiber *fiber = fiber_from_id(wait_time_list->first_waiter); fiber; fiber = fiber_from_id(fiber->next_time_waiter)) {
if (atomic32_fetch_test_set(&fiber->wake_lock, 0, 1) == 0) {
fibers[num_fibers] = fiber;
++num_fibers;
}
}
}
}
tm_unlock(&wait_time_bin->lock);
}
wake_fibers_locked(num_fibers, fibers);
scratch_end(scratch);
}
void sys_wake(void *addr, i32 count)
{
wake_address(addr, count);
}
/* ========================== *
* Fibers
* ========================== */
INTERNAL void job_fiber_entry(void *id_ptr);
/* If `pool` is 0, then the currently running thread will be converted into a fiber */
INTERNAL struct fiber *fiber_alloc(struct job_pool *pool)
{
i16 fiber_id = 0;
struct fiber *fiber = 0;
char *new_name_cstr = 0;
{
if (pool != 0) {
tm_lock(&pool->free_fibers_lock);
if (pool->first_free_fiber_id) {
fiber_id = pool->first_free_fiber_id;
fiber = &G.fibers[fiber_id];
pool->first_free_fiber_id = fiber->parent_id;
}
tm_unlock(&pool->free_fibers_lock);
}
if (!fiber_id) {
tm_lock(&G.fibers_lock);
{
{
fiber_id = G.num_fibers++;
if (fiber_id >= SYS_MAX_FIBERS) {
sys_panic(LIT("Max fibers reached"));
}
fiber = &G.fibers[fiber_id];
new_name_cstr = arena_push_array(G.fiber_names_arena, char, FIBER_NAME_MAX_SIZE);
}
}
tm_unlock(&G.fibers_lock);
}
}
if (new_name_cstr != 0) {
fiber->id = fiber_id;
/* Id to ASCII */
i32 id_div = fiber_id;
char id_chars[64] = ZI;
i32 id_chars_len = 0;
do {
i32 digit = id_div % 10;
id_div /= 10;
id_chars[id_chars_len] = ("0123456789")[digit];
++id_chars_len;
} while (id_div > 0);
i32 rev_start = 0;
i32 rev_end = id_chars_len - 1;
while (rev_start < rev_end) {
char swp = id_chars[rev_start];
id_chars[rev_start] = id_chars[rev_end];
id_chars[rev_end] = swp;
++rev_start;
--rev_end;
}
/* Concat fiber name */
i32 name_size = 1;
ASSERT(sizeof(sizeof(FIBER_NAME_PREFIX_CSTR)) <= FIBER_NAME_MAX_SIZE);
MEMCPY(new_name_cstr, FIBER_NAME_PREFIX_CSTR, sizeof(FIBER_NAME_PREFIX_CSTR));
name_size += sizeof(FIBER_NAME_PREFIX_CSTR) - 2;
MEMCPY(new_name_cstr + name_size, id_chars, id_chars_len);
name_size += id_chars_len;
MEMCPY(new_name_cstr + name_size, FIBER_NAME_SUFFIX_CSTR, sizeof(FIBER_NAME_SUFFIX_CSTR));
name_size += sizeof(FIBER_NAME_SUFFIX_CSTR) - 2;
fiber->name_cstr = new_name_cstr;
/* Init win32 fiber */
if (pool != 0) {
fiber->addr = CreateFiber(FIBER_STACK_SIZE, job_fiber_entry, (void *)(i64)fiber_id);
} else {
/* Fiber is not a part of a job pool, convert thread to fiber */
fiber->addr = ConvertThreadToFiber((void *)(i64)fiber_id);
}
}
fiber->wait_addr = 0;
fiber->wait_time = 0;
fiber->prev_addr_waiter = 0;
fiber->next_addr_waiter = 0;
fiber->prev_time_waiter = 0;
fiber->next_time_waiter = 0;
fiber->job_func = 0;
fiber->job_sig = 0;
fiber->job_id = 0;
fiber->job_pool = 0;
fiber->job_priority = 0;
fiber->job_counter = 0;
fiber->yield_param = 0;
fiber->parent_id = 0;
return fiber;
}
INTERNAL void fiber_release(struct job_pool *pool, struct fiber *fiber)
{
tm_lock(&pool->free_fibers_lock);
{
i16 fiber_id = fiber->id;
fiber->parent_id = pool->first_free_fiber_id;
pool->first_free_fiber_id = fiber_id;
}
tm_unlock(&pool->free_fibers_lock);
}
FORCE_INLINE struct fiber *fiber_from_id(i16 id)
{
if (id <= 0) {
return 0;
} else {
return &G.fibers[id];
}
}
/* ========================== *
* Test job
* ========================== */
i16 sys_current_fiber_id(void)
{
return (i16)(i64)GetFiberData();
}
void sys_run(i32 count, sys_job_func *func, void *sig, enum sys_pool pool_kind, enum sys_priority priority, struct snc_counter *counter)
{
if (count > 0) {
if (counter) {
snc_counter_add(counter, count);
}
struct fiber *fiber = fiber_from_id(sys_current_fiber_id());
priority = clamp_i32(priority, fiber->job_priority, NUM_SYS_PRIORITIES - 1); /* A job cannot create a job with a higher priority than itself */
if (pool_kind == SYS_POOL_INHERIT) {
pool_kind = fiber->job_pool;
}
struct job_pool *pool = &G.job_pools[pool_kind];
struct job_queue *queue = &pool->job_queues[priority];
tm_lock(&queue->lock);
{
struct job_info *info = 0;
if (queue->first_free) {
info = queue->first_free;
queue->first_free = info->next;
} else {
info = arena_push_no_zero(queue->arena, struct job_info);
}
MEMZERO_STRUCT(info);
info->count = count;
info->func = func;
info->sig = sig;
info->counter = counter;
if (queue->last) {
queue->last->next = info;
} else {
queue->first = info;
}
queue->last = info;
}
tm_unlock(&queue->lock);
/* Wake workers */
struct snc_lock lock = snc_lock_e(&pool->workers_wake_mutex);
{
atomic64_fetch_add(&pool->num_jobs_in_queue.v, count);
snc_cv_signal(&pool->workers_wake_cv, count);
}
snc_unlock(&lock);
}
}
/* ========================== *
* Job fiber control
* ========================== */
INTERNAL void job_fiber_yield(struct fiber *fiber, struct fiber *parent_fiber)
{
(UNUSED)fiber;
ASSERT(fiber->id == sys_current_fiber_id());
ASSERT(parent_fiber->id == fiber->parent_id);
ASSERT(parent_fiber->id > 0);
{
__prof_fiber_leave();
MemoryBarrier();
SwitchToFiber(parent_fiber->addr);
MemoryBarrier();
__prof_fiber_enter(fiber->name_cstr, PROF_THREAD_GROUP_FIBERS - MEBI(fiber->job_pool) + KIBI(1) + fiber->id);
}
}
INTERNAL void job_fiber_resume(struct fiber *fiber)
{
MemoryBarrier();
SwitchToFiber(fiber->addr);
MemoryBarrier();
}
/* ========================== *
* Job fiber entry
* ========================== */
INTERNAL void job_fiber_entry(void *id_ptr)
{
i16 id = (i32)(i64)id_ptr;
struct fiber *fiber = fiber_from_id(id);
__prof_fiber_enter(fiber->name_cstr, PROF_THREAD_GROUP_FIBERS - MEBI(fiber->job_pool) + KIBI(1) + fiber->id);
for (;;) {
/* Run job */
{
volatile struct yield_param *yield_param = fiber->yield_param;
yield_param->kind = YIELD_KIND_NONE;
struct sys_job_data data = ZI;
data.id = fiber->job_id;
data.sig = fiber->job_sig;
{
MemoryBarrier();
fiber->job_func(data);
MemoryBarrier();
}
}
/* Yield */
{
volatile struct yield_param *yield_param = fiber->yield_param;
yield_param->kind = YIELD_KIND_DONE;
struct fiber *parent_fiber = fiber_from_id(fiber->parent_id);
job_fiber_yield(fiber, parent_fiber);
}
}
}
/* ========================== *
* Job worker thread
* ========================== */
INTERNAL SYS_THREAD_DEF(job_worker_entry, worker_ctx_arg)
{
struct worker_ctx *ctx = worker_ctx_arg;
enum sys_pool pool_kind = ctx->pool_kind;
struct job_pool *pool = &G.job_pools[pool_kind];
(UNUSED)ctx;
{
/* TODO: Heuristic pinning */
/* TODO: Pin non-worker threads to other cores */
HANDLE thread_handle = GetCurrentThread();
if (pool->thread_priority) {
b32 success = SetThreadPriority(thread_handle, pool->thread_priority) != 0;
ASSERT(success);
(UNUSED)success;
}
if (pool->thread_affinity_mask) {
b32 success = SetThreadAffinityMask(thread_handle, pool->thread_affinity_mask) != 0;
ASSERT(success);
(UNUSED)success;
}
if (pool->thread_is_audio) {
/* https://learn.microsoft.com/en-us/windows/win32/procthread/multimedia-class-scheduler-service#registry-settings */
DWORD task = 0;
HANDLE mmc_handle = AvSetMmThreadCharacteristics(L"Pro Audio", &task);
ASSERT(mmc_handle);
(UNUSED)mmc_handle;
}
}
i32 worker_fiber_id = sys_current_fiber_id();
struct fiber *job_fiber = 0;
b32 shutdown = 0;
while (!shutdown) {
/* Pull job from queue */
enum sys_priority job_priority = 0;
i16 job_fiber_id = 0;
i32 job_id = 0;
sys_job_func *job_func = 0;
void *job_sig = 0;
struct snc_counter *job_counter = 0;
{
//__profnc("Pull job", RGB32_F(0.75, 0.75, 0));
for (enum sys_priority priority = 0; priority < (i32)countof(pool->job_queues) && !job_func; ++priority) {
struct job_queue *queue = &pool->job_queues[priority];
if (queue) {
tm_lock(&queue->lock);
{
struct job_info *info = queue->first;
while (info && !job_func) {
struct job_info *next = info->next;
b32 dequeue = 0;
if (info->fiber_id <= 0) {
job_id = info->num_dispatched++;
if (job_id < info->count) {
/* Pick job */
atomic64_fetch_add(&pool->num_jobs_in_queue.v, -1);
job_priority = priority;
job_func = info->func;
job_sig = info->sig;
job_counter = info->counter;
if (job_id == (info->count - 1)) {
/* We're picking up the last dispatch, so dequeue the job */
dequeue = 1;
}
}
} else {
/* This job is to be resumed from a yield */
atomic64_fetch_add(&pool->num_jobs_in_queue.v, -1);
job_fiber_id = info->fiber_id;
job_priority = priority;
job_id = info->num_dispatched;
job_func = info->func;
job_sig = info->sig;
job_counter = info->counter;
dequeue = 1;
}
if (dequeue) {
if (!next) {
queue->last = 0;
}
queue->first = next;
info->next = queue->first_free;
queue->first_free = info;
}
info = next;
}
}
tm_unlock(&queue->lock);
}
}
}
/* Use resumed fiber if present */
if (job_fiber_id > 0) {
if (job_fiber) {
fiber_release(pool, job_fiber);
}
job_fiber = fiber_from_id(job_fiber_id);
}
/* Run fiber */
if (job_func) {
if (!job_fiber) {
job_fiber = fiber_alloc(pool);
}
job_fiber_id = job_fiber->id;
{
__profnc("Run fiber", RGB32_F(1, 1, 1));
__profvalue(job_fiber->id);
struct yield_param yield = ZI;
job_fiber->parent_id = worker_fiber_id;
job_fiber->job_func = job_func;
job_fiber->job_sig = job_sig;
job_fiber->job_id = job_id;
job_fiber->job_pool = pool_kind;
job_fiber->job_priority = job_priority;
job_fiber->job_counter = job_counter;
job_fiber->yield_param = &yield;
b32 done = 0;
while (!done) {
job_fiber_resume(job_fiber);
switch (yield.kind) {
default:
{
/* Invalid yield kind */
struct arena_temp scratch = scratch_begin_no_conflict();
sys_panic(string_format(scratch.arena, LIT("Invalid fiber yield kind \"%F\""), FMT_SINT(yield.kind)));
scratch_end(scratch);
} break;
case YIELD_KIND_WAIT:
{
volatile void *wait_addr = yield.wait.addr;
void *wait_cmp = yield.wait.cmp;
u32 wait_size = yield.wait.size;
i64 wait_timeout_ns = yield.wait.timeout_ns;
i64 wait_time = 0;
if (wait_timeout_ns > 0 && wait_timeout_ns < I64_MAX) {
u64 current_scheduler_cycle = atomic64_fetch(&G.current_scheduler_cycle.v);
i64 current_scheduler_cycle_period_ns = atomic64_fetch(&G.current_scheduler_cycle_period_ns.v);
wait_time = current_scheduler_cycle + max_i64((i64)((f64)wait_timeout_ns / (f64)current_scheduler_cycle_period_ns), 1);
}
u64 wait_addr_bin_index = (u64)wait_addr % NUM_WAIT_ADDR_BINS;
u64 wait_time_bin_index = (u64)wait_time % NUM_WAIT_TIME_BINS;
struct wait_bin *wait_addr_bin = &G.wait_addr_bins[wait_addr_bin_index];
struct wait_bin *wait_time_bin = &G.wait_time_bins[wait_time_bin_index];
if (wait_addr != 0) tm_lock(&wait_addr_bin->lock);
{
if (wait_time != 0) tm_lock(&wait_time_bin->lock);
{
b32 cancel_wait = wait_addr == 0 && wait_time == 0;
if (wait_addr != 0) {
switch (wait_size) {
case 1: cancel_wait = (u8)_InterlockedCompareExchange8(wait_addr, 0, 0) != *(u8 *)wait_cmp; break;
case 2: cancel_wait = (u16)_InterlockedCompareExchange16(wait_addr, 0, 0) != *(u16 *)wait_cmp; break;
case 4: cancel_wait = (u32)_InterlockedCompareExchange(wait_addr, 0, 0) != *(u32 *)wait_cmp; break;
case 8: cancel_wait = (u64)_InterlockedCompareExchange64(wait_addr, 0, 0) != *(u64 *)wait_cmp; break;
default: cancel_wait = 1; ASSERT(0); break; /* Invalid wait size */
}
}
if (wait_time != 0 && !cancel_wait) {
cancel_wait = wait_time <= atomic64_fetch(&G.current_scheduler_cycle.v);
}
if (!cancel_wait) {
if (wait_addr != 0) {
/* Search for wait addr list in bin */
struct wait_list *wait_addr_list = 0;
for (struct wait_list *tmp = wait_addr_bin->first_wait_list; tmp && !wait_addr_list; tmp = tmp->next_in_bin) {
if (tmp->value == (u64)wait_addr) {
wait_addr_list = tmp;
}
}
/* Allocate new wait addr list */
if (!wait_addr_list) {
if (wait_addr_bin->first_free_wait_list) {
wait_addr_list = wait_addr_bin->first_free_wait_list;
wait_addr_bin->first_free_wait_list = wait_addr_list->next_in_bin;
} else {
tm_lock(&G.wait_lists_arena_lock);
{
wait_addr_list = arena_push_no_zero(G.wait_lists_arena, struct wait_list);
}
tm_unlock(&G.wait_lists_arena_lock);
}
MEMZERO_STRUCT(wait_addr_list);
wait_addr_list->value = wait_addr;
if (wait_addr_bin->last_wait_list) {
wait_addr_bin->last_wait_list->next_in_bin = wait_addr_list;
wait_addr_list->prev_in_bin = wait_addr_bin->last_wait_list;
} else {
wait_addr_bin->first_wait_list = wait_addr_list;
}
wait_addr_bin->last_wait_list = wait_addr_list;
}
/* Insert fiber into wait addr list */
job_fiber->wait_addr = wait_addr;
if (wait_addr_list->last_waiter) {
fiber_from_id(wait_addr_list->last_waiter)->next_addr_waiter = job_fiber_id;
job_fiber->prev_addr_waiter = wait_addr_list->last_waiter;
} else {
wait_addr_list->first_waiter = job_fiber_id;
}
wait_addr_list->last_waiter = job_fiber_id;
++wait_addr_list->num_waiters;
}
if (wait_time != 0) {
/* Search for wait time list in bin */
struct wait_list *wait_time_list = 0;
for (struct wait_list *tmp = wait_time_bin->first_wait_list; tmp && !wait_time_list; tmp = tmp->next_in_bin) {
if (tmp->value == (u64)wait_time) {
wait_time_list = tmp;
}
}
/* Allocate new wait time list */
if (!wait_time_list) {
if (wait_time_bin->first_free_wait_list) {
wait_time_list = wait_time_bin->first_free_wait_list;
wait_time_bin->first_free_wait_list = wait_time_list->next_in_bin;
} else {
tm_lock(&G.wait_lists_arena_lock);
{
wait_time_list = arena_push_no_zero(G.wait_lists_arena, struct wait_list);
}
tm_unlock(&G.wait_lists_arena_lock);
}
MEMZERO_STRUCT(wait_time_list);
wait_time_list->value = wait_time;
if (wait_time_bin->last_wait_list) {
wait_time_bin->last_wait_list->next_in_bin = wait_time_list;
wait_time_list->prev_in_bin = wait_time_bin->last_wait_list;
} else {
wait_time_bin->first_wait_list = wait_time_list;
}
wait_time_bin->last_wait_list = wait_time_list;
}
/* Insert fiber into wait time list */
job_fiber->wait_time = wait_time;
if (wait_time_list->last_waiter) {
fiber_from_id(wait_time_list->last_waiter)->next_time_waiter = job_fiber_id;
job_fiber->prev_time_waiter = wait_time_list->last_waiter;
} else {
wait_time_list->first_waiter = job_fiber_id;
}
wait_time_list->last_waiter = job_fiber_id;
++wait_time_list->num_waiters;
}
/* Pop worker's job fiber */
job_fiber = 0;
done = 1;
}
}
if (wait_time != 0) tm_unlock(&wait_time_bin->lock);
}
if (wait_addr != 0) tm_unlock(&wait_addr_bin->lock);
} break;
case YIELD_KIND_DONE:
{
if (job_counter) {
snc_counter_add(job_counter, -1);
}
done = 1;
} break;
}
}
}
}
/* Wait */
struct snc_lock wake_lock = snc_lock_s(&pool->workers_wake_mutex);
{
shutdown = atomic32_fetch(&pool->workers_shutdown.v);
while (atomic64_fetch(&pool->num_jobs_in_queue.v) <= 0 && !shutdown) {
//__profnc("Wait for job", RGB32_F(0.75, 0.75, 0));
snc_cv_wait(&pool->workers_wake_cv, &wake_lock);
shutdown = atomic32_fetch(&pool->workers_shutdown.v);
}
}
snc_unlock(&wake_lock);
}
/* Worker shutdown */
if (job_fiber) {
fiber_release(pool, job_fiber);
}
}
/* ========================== *
* Job scheduler thread
* ========================== */
INTERNAL SYS_THREAD_DEF(job_scheduler_entry, _)
{
(UNUSED)_;
{
i32 priority = THREAD_PRIORITY_TIME_CRITICAL;
b32 success = SetThreadPriority(GetCurrentThread(), priority);
(UNUSED)success;
ASSERT(success);
}
/* Create high resolution timer */
HANDLE timer = CreateWaitableTimerExW(0, 0, CREATE_WAITABLE_TIMER_HIGH_RESOLUTION, TIMER_ALL_ACCESS);
if (!timer) {
sys_panic(LIT("Failed to create high resolution timer"));
}
/* Create rolling buffer of scheduler cycles initialized to default value */
i32 periods_index = 0;
i64 periods[NUM_ROLLING_SCHEDULER_PERIODS] = ZI;
for (i32 i = 0; i < (i32)countof(periods); ++i) {
periods[i] = DEFAULT_SCHEDULER_CYCLE_PERIOD_NS;
}
i64 last_cycle_ns = 0;
while (!atomic32_fetch(&G.shutdown)) {
__profn("Job scheduler cycle");
{
__profn("Job scheduler wait");
LARGE_INTEGER due = ZI;
due.QuadPart = -1;
//due.QuadPart = -10000;
//due.QuadPart = -32000;
//due.QuadPart = -12000;
//due.QuadPart = -8000;
SetWaitableTimerEx(timer, &due, 0, 0, 0, 0, 0);
WaitForSingleObject(timer, INFINITE);
}
/* Calculate mean period */
i64 now_ns = sys_time_ns();
i64 period_ns = last_cycle_ns == 0 ? DEFAULT_SCHEDULER_CYCLE_PERIOD_NS : now_ns - last_cycle_ns;
last_cycle_ns = now_ns;
/* Calculate mean period */
{
periods[periods_index++] = period_ns;
if (periods_index == countof(periods)) {
periods_index = 0;
}
f64 periods_sum_ns = 0;
for (i32 i = 0; i < (i32)countof(periods); ++i) {
periods_sum_ns += (f64)periods[i];
}
f64 mean_ns = periods_sum_ns / (f64)countof(periods);
atomic64_fetch_set(&G.current_scheduler_cycle_period_ns.v, math_round_to_int64(mean_ns));
}
{
__profn("Job scheduler run");
i64 current_cycle = atomic64_fetch_add(&G.current_scheduler_cycle.v, 1) + 1;
wake_time((u64)current_cycle);
}
}
}
/* ========================== *
* Test thread
* ========================== */
INTERNAL SYS_THREAD_DEF(test_entry, _)
{
struct arena_temp scratch = scratch_begin_no_conflict();
(UNUSED)_;
/* Start scheduler */
atomic64_fetch_set(&G.current_scheduler_cycle_period_ns.v, DEFAULT_SCHEDULER_CYCLE_PERIOD_NS);
struct sys_thread *scheduler_thread = sys_thread_alloc(job_scheduler_entry, 0, LIT("Scheduler thread"), PROF_THREAD_GROUP_SCHEDULER);
/* Start workers */
/* TODO: Heuristic worker counts & affinities */
for (enum sys_pool pool_kind = 0; pool_kind < (i32)countof(G.job_pools); ++pool_kind) {
struct job_pool *pool = &G.job_pools[pool_kind];
struct string name_fmt = ZI;
i32 prof_group = PROF_THREAD_GROUP_FIBERS - MEBI(pool_kind);
switch (pool_kind) {
default: ASSERT(0); break;
case SYS_POOL_SIM:
{
name_fmt = LIT("Sim worker #%F");
pool->num_worker_threads = 4;
pool->thread_affinity_mask = 0x000000000000000Full;
pool->thread_priority = THREAD_PRIORITY_TIME_CRITICAL;
} break;
case SYS_POOL_USER:
{
name_fmt = LIT("User worker #%F");
pool->num_worker_threads = 4;
pool->thread_affinity_mask = 0x00000000000000F0ull;
pool->thread_priority = THREAD_PRIORITY_TIME_CRITICAL;
} break;
case SYS_POOL_AUDIO:
{
name_fmt = LIT("Audio worker #%F");
pool->num_worker_threads = 2;
pool->thread_affinity_mask = 0x0000000000000300ull;
pool->thread_priority = THREAD_PRIORITY_TIME_CRITICAL;
pool->thread_is_audio = 1;
} break;
case SYS_POOL_BACKGROUND:
{
name_fmt = LIT("Background worker #%F");
pool->num_worker_threads = 2;
pool->thread_affinity_mask = 0x0000000000000C00ull;
} break;
case SYS_POOL_FLOATING:
{
name_fmt = LIT("Floating worker #%F");
pool->num_worker_threads = 8;
pool->thread_affinity_mask = 0x0000000000000FFFull;
} break;
}
pool->worker_threads_arena = arena_alloc(GIBI(64));
pool->worker_threads = arena_push_array(pool->worker_threads_arena, struct sys_thread *, pool->num_worker_threads);
pool->worker_contexts = arena_push_array(pool->worker_threads_arena, struct worker_ctx, pool->num_worker_threads);
for (i32 i = 0; i < pool->num_worker_threads; ++i) {
struct worker_ctx *ctx = &pool->worker_contexts[i];
ctx->pool_kind = pool_kind;
ctx->id = i;
struct string name = string_format(scratch.arena, name_fmt, FMT_SINT(i));
pool->worker_threads[i] = sys_thread_alloc(job_worker_entry, ctx, name, prof_group + i);
}
}
/* Wait on workers */
for (enum sys_pool pool_kind = 0; pool_kind < (i32)countof(G.job_pools); ++pool_kind) {
struct job_pool *pool = &G.job_pools[pool_kind];
for (i32 i = 0; i < pool->num_worker_threads; ++i) {
struct sys_thread *worker_thread = pool->worker_threads[i];
sys_thread_wait_release(worker_thread);
}
}
/* Wait on scheduler */
sys_thread_wait_release(scheduler_thread);
scratch_end(scratch);
}
/* ========================== *
* Scratch context
* ========================== */
struct sys_scratch_ctx *sys_scratch_ctx_from_fiber_id(i16 id)
{
struct fiber *fiber = fiber_from_id(id);
struct sys_scratch_ctx *scratch_ctx = &fiber->scratch_ctx;
if (!scratch_ctx->arenas[0]) {
__profn("Initialize scratch context");
for (u32 i = 0; i < countof(scratch_ctx->arenas); ++i) {
scratch_ctx->arenas[i] = arena_alloc(GIBI(64));
}
}
return scratch_ctx;
}
/* ========================== *
* Events
* ========================== */
INTERNAL void win32_init_vk_btn_table(void)
{
MEMZERO_ARRAY(G.vk_btn_table);
for (u32 i = 'A', j = SYS_BTN_A; i <= 'Z'; ++i, ++j) {
G.vk_btn_table[i] = (enum sys_btn)j;
}
for (u32 i = '0', j = SYS_BTN_0; i <= '9'; ++i, ++j) {
G.vk_btn_table[i] = (enum sys_btn)j;
}
for (u32 i = VK_F1, j = SYS_BTN_F1; i <= VK_F24; ++i, ++j) {
G.vk_btn_table[i] = (enum sys_btn)j;
}
G.vk_btn_table[VK_ESCAPE] = SYS_BTN_ESC;
G.vk_btn_table[VK_OEM_3] = SYS_BTN_GRAVE_ACCENT;
G.vk_btn_table[VK_OEM_MINUS] = SYS_BTN_MINUS;
G.vk_btn_table[VK_OEM_PLUS] = SYS_BTN_EQUAL;
G.vk_btn_table[VK_BACK] = SYS_BTN_BACKSPACE;
G.vk_btn_table[VK_TAB] = SYS_BTN_TAB;
G.vk_btn_table[VK_SPACE] = SYS_BTN_SPACE;
G.vk_btn_table[VK_RETURN] = SYS_BTN_ENTER;
G.vk_btn_table[VK_CONTROL] = SYS_BTN_CTRL;
G.vk_btn_table[VK_SHIFT] = SYS_BTN_SHIFT;
G.vk_btn_table[VK_MENU] = SYS_BTN_ALT;
G.vk_btn_table[VK_UP] = SYS_BTN_UP;
G.vk_btn_table[VK_LEFT] = SYS_BTN_LEFT;
G.vk_btn_table[VK_DOWN] = SYS_BTN_DOWN;
G.vk_btn_table[VK_RIGHT] = SYS_BTN_RIGHT;
G.vk_btn_table[VK_DELETE] = SYS_BTN_DELETE;
G.vk_btn_table[VK_PRIOR] = SYS_BTN_PAGE_UP;
G.vk_btn_table[VK_NEXT] = SYS_BTN_PAGE_DOWN;
G.vk_btn_table[VK_HOME] = SYS_BTN_HOME;
G.vk_btn_table[VK_END] = SYS_BTN_END;
G.vk_btn_table[VK_OEM_2] = SYS_BTN_FORWARD_SLASH;
G.vk_btn_table[VK_OEM_PERIOD] = SYS_BTN_PERIOD;
G.vk_btn_table[VK_OEM_COMMA] = SYS_BTN_COMMA;
G.vk_btn_table[VK_OEM_7] = SYS_BTN_QUOTE;
G.vk_btn_table[VK_OEM_4] = SYS_BTN_LEFT_BRACKET;
G.vk_btn_table[VK_OEM_6] = SYS_BTN_RIGHT_BRACKET;
G.vk_btn_table[VK_INSERT] = SYS_BTN_INSERT;
G.vk_btn_table[VK_OEM_1] = SYS_BTN_SEMICOLON;
}
/* ========================== *
* Memory
* ========================== */
void *sys_memory_reserve(u64 size)
{
void *ptr = VirtualAlloc(0, size, MEM_RESERVE, PAGE_NOACCESS);
return ptr;
}
void sys_memory_release(void *address)
{
VirtualFree(address, 0, MEM_RELEASE);
}
void *sys_memory_commit(void *address, u64 size)
{
void *ptr = VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE);
return ptr;
}
void sys_memory_decommit(void *address, u64 size)
{
VirtualFree(address, size, MEM_DECOMMIT);
}
void sys_memory_set_committed_readonly(void *address, u64 size)
{
DWORD old;
VirtualProtect(address, size, PAGE_READONLY, &old);
}
void sys_memory_set_committed_readwrite(void *address, u64 size)
{
DWORD old;
VirtualProtect(address, size, PAGE_READWRITE, &old);
}
/* ========================== *
* Time
* ========================== */
INTERNAL struct sys_datetime win32_time_to_sys_time(SYSTEMTIME st)
{
return (struct sys_datetime)
{
.year = st.wYear,
.month = st.wMonth,
.day_of_week = st.wDayOfWeek,
.day = st.wDay,
.hour = st.wHour,
.minute = st.wMinute,
.second = st.wSecond,
.milliseconds = st.wMilliseconds
};
}
struct sys_datetime sys_local_time(void)
{
SYSTEMTIME lt;
GetLocalTime(&lt);
return win32_time_to_sys_time(lt);
}
i64 sys_time_ns(void)
{
LARGE_INTEGER qpc;
QueryPerformanceCounter(&qpc);
i64 res = (qpc.QuadPart - G.timer_start_qpc) * G.ns_per_qpc;
return res;
}
/* ========================== *
* File system
* ========================== */
INTERNAL struct string string_from_win32_path(struct arena *arena, wchar_t *src)
{
struct string res = {
.len = 0,
.text = arena_push_dry(arena, u8)
};
while (*src) {
struct string16 decode_str = { .len = *(src + 1) ? 2 : 1, .text = src };
struct uni_decode_utf16_result decoded = uni_decode_utf16(decode_str);
struct uni_encode_utf8_result encoded = uni_encode_utf8(decoded.codepoint);
u8 *dest = arena_push_array_no_zero(arena, u8, encoded.count8);
for (u32 i = 0; i < encoded.count8; ++i) {
u8 byte = encoded.chars8[i];
if (byte == '\\') {
byte = '/';
}
dest[i] = byte;
}
res.len += encoded.count8;
src += decoded.advance16;
}
return res;
}
struct string sys_get_write_path(struct arena *arena)
{
u16 *p = 0;
/* TODO: cache this? */
HRESULT res = SHGetKnownFolderPath(
&FOLDERID_LocalAppData,
0,
0,
&p
);
struct string path = ZI;
if (res == S_OK) {
path = string_from_win32_path(arena, p);
}
CoTaskMemFree(p);
return path;
}
b32 sys_is_file(struct string path)
{
__prof;
struct arena_temp scratch = scratch_begin_no_conflict();
wchar_t *path_wstr = wstr_from_string(scratch.arena, path);
DWORD attributes = GetFileAttributesW(path_wstr);
scratch_end(scratch);
return attributes != INVALID_FILE_ATTRIBUTES && !(attributes & FILE_ATTRIBUTE_DIRECTORY);
}
b32 sys_is_dir(struct string path)
{
struct arena_temp scratch = scratch_begin_no_conflict();
wchar_t *path_wstr = wstr_from_string(scratch.arena, path);
DWORD attributes = GetFileAttributesW(path_wstr);
scratch_end(scratch);
return attributes != INVALID_FILE_ATTRIBUTES && (attributes & FILE_ATTRIBUTE_DIRECTORY);
}
void sys_mkdir(struct string path)
{
__prof;
struct arena_temp scratch = scratch_begin_no_conflict();
wchar_t *path_wstr = wstr_from_string(scratch.arena, path);
int err_code = SHCreateDirectory(0, path_wstr);
struct string err = ZI;
switch (err_code) {
case ERROR_BAD_PATHNAME: {
err = LIT("Bad path name");
} break;
case ERROR_FILENAME_EXCED_RANGE: {
err = LIT("Path name too long");
} break;
case ERROR_FILE_EXISTS: {
err = LIT("A file already exists at this location");
} break;
case ERROR_CANCELLED: {
err = LIT("User canceled the operation");
} break;
default: break;
}
if (err.len > 0) {
struct string msg = string_format(scratch.arena,
LIT("Failed to create directory \"%F\": %F"),
FMT_STR(path),
FMT_STR(err));
sys_panic(msg);
}
scratch_end(scratch);
}
struct sys_file sys_file_open_read(struct string path)
{
__prof;
struct arena_temp scratch = scratch_begin_no_conflict();
struct sys_file file = ZI;
wchar_t *path_wstr = wstr_from_string(scratch.arena, path);
HANDLE handle = CreateFileW(
path_wstr,
GENERIC_READ,
FILE_SHARE_READ,
0,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
0
);
file.handle = (u64)handle;
file.valid = handle != INVALID_HANDLE_VALUE;
scratch_end(scratch);
return file;
}
struct sys_file sys_file_open_read_wait(struct string path)
{
__prof;
struct arena_temp scratch = scratch_begin_no_conflict();
struct sys_file file = ZI;
wchar_t *path_wstr = wstr_from_string(scratch.arena, path);
i32 delay_ms = 1;
HANDLE handle;
while ((handle = CreateFileW(path_wstr, GENERIC_READ, FILE_SHARE_READ, 0, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, 0)) == INVALID_HANDLE_VALUE) {
if (GetLastError() == ERROR_SHARING_VIOLATION) {
__profn("File share conflict delay");
Sleep(delay_ms);
if (delay_ms < 1024) {
delay_ms *= 2;
}
} else {
break;
}
}
file.handle = (u64)handle;
file.valid = handle != INVALID_HANDLE_VALUE;
scratch_end(scratch);
return file;
}
struct sys_file sys_file_open_write(struct string path)
{
__prof;
struct arena_temp scratch = scratch_begin_no_conflict();
struct sys_file file = ZI;
wchar_t *path_wstr = wstr_from_string(scratch.arena, path);
HANDLE handle = CreateFileW(
path_wstr,
GENERIC_WRITE,
0,
0,
CREATE_ALWAYS,
FILE_ATTRIBUTE_NORMAL,
0
);
file.handle = (u64)handle;
file.valid = handle != INVALID_HANDLE_VALUE;
scratch_end(scratch);
return file;
}
struct sys_file sys_file_open_append(struct string path)
{
__prof;
struct arena_temp scratch = scratch_begin_no_conflict();
struct sys_file file = ZI;
wchar_t *path_wstr = wstr_from_string(scratch.arena, path);
HANDLE handle = CreateFileW(
path_wstr,
FILE_APPEND_DATA,
FILE_SHARE_READ,
0,
OPEN_ALWAYS,
FILE_ATTRIBUTE_NORMAL,
0
);
file.handle = (u64)handle;
file.valid = handle != INVALID_HANDLE_VALUE;
scratch_end(scratch);
return file;
}
void sys_file_close(struct sys_file file)
{
__prof;
if (file.handle) {
CloseHandle((HANDLE)file.handle);
}
}
struct string sys_file_read_all(struct arena *arena, struct sys_file file)
{
__prof;
i64 size = 0;
GetFileSizeEx((HANDLE)file.handle, (PLARGE_INTEGER)&size);
struct string s = {
.len = size,
.text = 0
};
if (size > 0) {
/* ReadFile returns non-zero on success */
/* TODO: error checking */
arena_align(arena, 16);
s.text = arena_push_array_no_zero(arena, u8, size);
(UNUSED)ReadFile(
(HANDLE)file.handle,
s.text,
(DWORD)s.len,
0, /* lpNumberOfBytesRead */
0
);
}
return s;
}
void sys_file_write(struct sys_file file, struct string data)
{
__prof;
/* TODO: Check what the real data limit is and chunk sequentially based on
* that (rather than failing) */
if (data.len >= 0x7FFF) {
struct arena_temp scratch = scratch_begin_no_conflict();
sys_panic(string_format(scratch.arena,
LIT("Tried to write too many bytes to disk (%F)"),
FMT_UINT(data.len)));
scratch_end(scratch);
}
/* WriteFile returns TRUE on success */
(UNUSED)WriteFile(
(HANDLE)file.handle,
data.text,
(DWORD)data.len,
0, /* lpNumberOfBytesWritten */
0
);
}
u64 sys_file_get_size(struct sys_file file)
{
LARGE_INTEGER li_file_size;
GetFileSizeEx((HANDLE)file.handle, &li_file_size);
return (u64)(li_file_size.QuadPart > 0 ? li_file_size.QuadPart : 0);
}
struct sys_file_time sys_file_get_time(struct sys_file file)
{
__prof;
/* Get file times */
FILETIME ft_created;
FILETIME ft_accessed;
FILETIME ft_modified;
b32 success = !!GetFileTime((HANDLE)file.handle, &ft_created, &ft_accessed, &ft_modified);
if (success) {
/* Convert file times to local file time */
FileTimeToLocalFileTime(&ft_created, &ft_created);
FileTimeToLocalFileTime(&ft_accessed, &ft_accessed);
FileTimeToLocalFileTime(&ft_modified, &ft_modified);
/* Convert local file times to system times */
SYSTEMTIME st_created;
SYSTEMTIME st_accessed;
SYSTEMTIME st_modified;
FileTimeToSystemTime(&ft_created, &st_created);
FileTimeToSystemTime(&ft_accessed, &st_accessed);
FileTimeToSystemTime(&ft_modified, &st_modified);
return (struct sys_file_time) {
.created = win32_time_to_sys_time(st_created),
.accessed = win32_time_to_sys_time(st_accessed),
.modified = win32_time_to_sys_time(st_modified)
};
} else {
return (struct sys_file_time) { 0 };
}
}
/* ========================== *
* File map
* ========================== */
struct sys_file_map sys_file_map_open_read(struct sys_file file)
{
__prof;
struct sys_file_map map = ZI;
u64 size = sys_file_get_size(file);
u8 *base_ptr = 0;
HANDLE map_handle = 0;
if (size > 0) {
map_handle = CreateFileMappingW(
(HANDLE)file.handle,
0,
PAGE_READONLY,
0,
0,
0
);
if (map_handle != INVALID_HANDLE_VALUE) {
base_ptr = MapViewOfFile(
map_handle,
FILE_MAP_READ,
0,
0,
0
);
if (base_ptr == 0) {
/* Failed to create view */
CloseHandle(map_handle);
map_handle = INVALID_HANDLE_VALUE;
}
}
}
if (map_handle == INVALID_HANDLE_VALUE) {
size = 0;
}
map.handle = (u64)map_handle;
map.mapped_memory = STRING(size, base_ptr);
map.valid = map_handle != INVALID_HANDLE_VALUE && base_ptr != 0;
return map;
}
void sys_file_map_close(struct sys_file_map map)
{
if (map.mapped_memory.text) {
UnmapViewOfFile(map.mapped_memory.text);
}
if (map.handle) {
CloseHandle((HANDLE)map.handle);
}
}
struct string sys_file_map_data(struct sys_file_map map)
{
return map.mapped_memory;
}
/* ========================== *
* File iter
* ========================== */
struct win32_file_filter {
HANDLE find_handle;
wchar_t *filter_wstr;
};
struct sys_file_filter sys_file_filter_begin(struct arena *arena, struct string pattern)
{
struct sys_file_filter filter = ZI;
struct win32_file_filter *filter_internal = arena_push(arena, struct win32_file_filter);
filter_internal->filter_wstr = wstr_from_string(arena, pattern);
filter.handle = (u64)filter_internal;
return filter;
}
b32 sys_file_filter_next(struct arena *arena, struct sys_file_filter *filter)
{
struct win32_file_filter *filter_internal = (struct win32_file_filter *)filter->handle;
WIN32_FIND_DATAW find_file_data = ZI;
b32 found = 0;
if (filter_internal->find_handle) {
found = FindNextFileW(filter_internal->find_handle, &find_file_data);
} else {
filter_internal->find_handle = FindFirstFileExW(filter_internal->filter_wstr, FindExInfoStandard, &find_file_data, FindExSearchNameMatch, 0, FIND_FIRST_EX_CASE_SENSITIVE | FIND_FIRST_EX_LARGE_FETCH);
found = filter_internal->find_handle != INVALID_HANDLE_VALUE;
}
if (found) {
struct string file_name = string_from_wstr_no_limit(arena, find_file_data.cFileName);
if (string_eq(file_name, LIT(".")) || string_eq(file_name, LIT(".."))) {
/* Skip initial '.' and '..' matches */
found = sys_file_filter_next(arena, filter);
} else {
filter->info.is_dir = find_file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY;
filter->info.filename = string_copy(arena, file_name);
}
}
return found;
}
void sys_file_filter_end(struct sys_file_filter *filter)
{
struct win32_file_filter *filter_internal = (struct win32_file_filter *)filter->handle;
if (filter_internal->find_handle) {
FindClose(filter_internal->find_handle);
}
}
/* ========================== *
* Watch
* ========================== */
struct win32_watch {
HANDLE dir_handle;
HANDLE wake_handle;
struct win32_watch *next_free;
u8 results_buff[KIBI(64)];
};
struct sys_watch *sys_watch_alloc(struct string dir_path)
{
struct arena_temp scratch = scratch_begin_no_conflict();
struct win32_watch *w32_watch = 0;
{
struct snc_lock lock = snc_lock_e(&G.watches_mutex);
{
if (G.watches_first_free) {
w32_watch = G.watches_first_free;
G.watches_first_free = w32_watch->next_free;
} else {
w32_watch = arena_push_no_zero(G.watches_arena, struct win32_watch);
}
}
snc_unlock(&lock);
}
MEMZERO_STRUCT(w32_watch);
wchar_t *dir_path_wstr = wstr_from_string(scratch.arena, dir_path);
w32_watch->dir_handle = CreateFileW(
dir_path_wstr,
FILE_LIST_DIRECTORY,
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
0,
OPEN_EXISTING,
FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OVERLAPPED,
0
);
w32_watch->wake_handle = CreateEventW(0, 0, 0, 0);
scratch_end(scratch);
return (struct sys_watch *)w32_watch;
}
void sys_watch_release(struct sys_watch *dw)
{
struct win32_watch *w32_watch = (struct win32_watch *)dw;
CloseHandle(w32_watch->dir_handle);
CloseHandle(w32_watch->wake_handle);
struct snc_lock lock = snc_lock_e(&G.watches_mutex);
{
w32_watch->next_free = G.watches_first_free;
G.watches_first_free = w32_watch;
}
snc_unlock(&lock);
}
struct sys_watch_info_list sys_watch_wait(struct arena *arena, struct sys_watch *dw)
{
__prof;
struct win32_watch *w32_watch = (struct win32_watch *)dw;
struct sys_watch_info_list list = ZI;
DWORD filter = FILE_NOTIFY_CHANGE_FILE_NAME |
FILE_NOTIFY_CHANGE_DIR_NAME |
FILE_NOTIFY_CHANGE_LAST_WRITE |
FILE_NOTIFY_CHANGE_CREATION;
b32 done = 0;
while (!done) {
OVERLAPPED ov = ZI;
ov.hEvent = CreateEventW(0, 0, 0, 0);
ASSERT(ov.hEvent);
BOOL success = ReadDirectoryChangesW(w32_watch->dir_handle,
w32_watch->results_buff,
countof(w32_watch->results_buff),
1,
filter,
0,
&ov,
0);
(UNUSED)success;
ASSERT(success);
HANDLE handles[] = {
ov.hEvent,
w32_watch->wake_handle
};
DWORD wait_res = WaitForMultipleObjects(2, handles, 0, INFINITE);
if (wait_res == WAIT_OBJECT_0) {
i64 offset = 0;
while (!done) {
FILE_NOTIFY_INFORMATION *res = (FILE_NOTIFY_INFORMATION *)(w32_watch->results_buff + offset);
struct sys_watch_info *info = arena_push(arena, struct sys_watch_info);
if (list.last) {
list.last->next = info;
info->prev = list.last;
} else {
list.first = info;
}
list.last = info;
++list.count;
struct string16 name16 = ZI;
name16.text = res->FileName;
name16.len = res->FileNameLength / sizeof(wchar_t);
info->name = string_from_string16(arena, name16);
for (u64 i = 0; i < info->name.len; ++i) {
if (info->name.text[i] == '\\') {
info->name.text[i] = '/';
}
}
switch (res->Action) {
case FILE_ACTION_ADDED:
{
info->kind = SYS_WATCH_INFO_KIND_ADDED;
} break;
case FILE_ACTION_REMOVED:
{
info->kind = SYS_WATCH_INFO_KIND_REMOVED;
} break;
case FILE_ACTION_MODIFIED:
{
info->kind = SYS_WATCH_INFO_KIND_MODIFIED;
} break;
case FILE_ACTION_RENAMED_OLD_NAME:
{
info->kind = SYS_WATCH_INFO_KIND_REMOVED;
} break;
case FILE_ACTION_RENAMED_NEW_NAME:
{
info->kind = SYS_WATCH_INFO_KIND_RENAMED_OLD;
} break;
default:
{
info->kind = SYS_WATCH_INFO_KIND_RENAMED_NEW;
} break;
}
if (res->NextEntryOffset == 0) {
done = 1;
} else {
offset += res->NextEntryOffset;
}
}
} else if (wait_res == WAIT_OBJECT_0 + 1) {
ResetEvent(w32_watch->wake_handle);
done = 1;
} else {
ASSERT(0);
}
}
return list;
}
void sys_watch_wake(struct sys_watch *dw)
{
struct win32_watch *w32_watch = (struct win32_watch *)dw;
SetEvent(w32_watch->wake_handle);
}
struct sys_watch_info_list sys_watch_info_copy(struct arena *arena, struct sys_watch_info_list src_list)
{
struct sys_watch_info_list dst_list = ZI;
for (struct sys_watch_info *src = src_list.first; src; src = src->next) {
struct sys_watch_info *dst = arena_push(arena, struct sys_watch_info);
dst->kind = src->kind;
dst->name = string_copy(arena, src->name);
if (dst_list.last) {
dst_list.last->next = dst;
dst->prev = dst_list.last;
dst_list.last = dst;
} else {
dst_list.first = dst;
dst_list.last = dst;
}
dst_list.count = src_list.count;
}
return dst_list;
}
/* ========================== *
* Window
* ========================== */
INTERNAL void win32_update_window_from_system(struct win32_window *window);
INTERNAL void win32_window_wake(struct win32_window *window);
INTERNAL void win32_window_process_event(struct win32_window *window, struct sys_event event)
{
__prof;
struct snc_lock lock = snc_lock_e(&window->event_arena_swp_mutex);
{
*arena_push(window->event_arenas[window->current_event_arena_index], struct sys_event) = event;
}
snc_unlock(&lock);
}
INTERNAL HWND win32_window_init(struct win32_window *window)
{
/*
* From martins (https://gist.github.com/mmozeiko/5e727f845db182d468a34d524508ad5f#file-win32_d3d11-c-L66-L70):
* WS_EX_NOREDIRECTIONBITMAP flag here is needed to fix ugly bug with Windows 10
* when window is resized and DXGI swap chain uses FLIP presentation model
* DO NOT use it if you choose to use non-FLIP presentation model
* read about the bug here: https://stackoverflow.com/q/63096226 and here: https://stackoverflow.com/q/53000291
*/
DWORD exstyle = WS_EX_APPWINDOW | WS_EX_NOREDIRECTIONBITMAP;
/* TODO: Check for hwnd success */
HWND hwnd = CreateWindowExW(
exstyle,
G.window_class.lpszClassName,
L"",
WS_OVERLAPPEDWINDOW,
CW_USEDEFAULT,
CW_USEDEFAULT,
CW_USEDEFAULT,
CW_USEDEFAULT,
0,
0,
G.window_class.hInstance,
0
);
/* Dark mode */
BOOL dark_mode = 1;
DwmSetWindowAttribute(hwnd, DWMWA_USE_IMMERSIVE_DARK_MODE, (LPCVOID)&dark_mode, sizeof(dark_mode));
/* Set window as userdata */
SetWindowLongPtrW(hwnd, GWLP_USERDATA, (LONG_PTR)window);
return hwnd;
}
INTERNAL SYS_THREAD_DEF(window_thread, arg)
{
struct win32_window *window = (struct win32_window *)arg;
/* Win32 limitation: Window must be initialized on same thread that processes events */
window->hwnd = win32_window_init(window);
window->tid = sys_thread_id();
win32_update_window_from_system(window);
BringWindowToTop(window->hwnd);
snc_counter_add(&window->ready_fence, -1);
while (!atomic32_fetch(&window->shutdown)) {
MSG msg = ZI;
{
GetMessageW(&msg, 0, 0, 0);
}
{
__profn("Process window message");
if (!atomic32_fetch(&window->shutdown)) {
TranslateMessage(&msg);
DispatchMessageW(&msg);
}
}
}
/* Destroy window hwnd */
DestroyWindow(window->hwnd);
}
INTERNAL struct win32_window *win32_window_alloc(void)
{
struct win32_window *window = 0;
{
struct snc_lock lock = snc_lock_e(&G.windows_mutex);
if (G.first_free_window) {
window = G.first_free_window;
G.first_free_window = window->next_free;
} else {
window = arena_push_no_zero(G.windows_arena, struct win32_window);
}
snc_unlock(&lock);
}
MEMZERO_STRUCT(window);
window->event_arenas[0] = arena_alloc(GIBI(64));
window->event_arenas[1] = arena_alloc(GIBI(64));
/* Start window event thread */
/* NOTE: This thread must finish building for the window to actually be
* created and receive a HWND, because on Windows a the event proc must run on
* the same thread that created the window. */
snc_counter_add(&window->ready_fence, 1);
window->window_thread = sys_thread_alloc(&window_thread, window, LIT("Win32 window thread"), PROF_THREAD_GROUP_WINDOW);
snc_counter_wait(&window->ready_fence);
return window;
}
INTERNAL void win32_window_release(struct win32_window *window)
{
/* Stop window threads */
atomic32_fetch_set(&window->shutdown, 1);
win32_window_wake(window);
sys_thread_wait_release(window->window_thread);
struct snc_lock lock = snc_lock_e(&G.windows_mutex);
{
window->next_free = G.first_free_window;
G.first_free_window = window;
}
snc_unlock(&lock);
}
struct sys_event_array sys_window_pop_events(struct arena *arena, struct sys_window *sys_window)
{
__prof;
struct win32_window *window = (struct win32_window *)sys_window;
i32 event_arena_index = 0;
{
/* Swap event buffers */
struct snc_lock lock = snc_lock_e(&window->event_arena_swp_mutex);
event_arena_index = window->current_event_arena_index;
window->current_event_arena_index = 1 - window->current_event_arena_index;
snc_unlock(&lock);
}
struct arena *events_arena = window->event_arenas[event_arena_index];
struct sys_event_array events = ZI;
events.count = events_arena->pos / sizeof(struct sys_event);
events.events = arena_push_array_no_zero(arena, struct sys_event, events.count);
MEMCPY(events.events, arena_base(events_arena), events_arena->pos);
arena_reset(events_arena);
return events;
}
INTERNAL void win32_update_window_from_system(struct win32_window *window)
{
HWND hwnd = window->hwnd;
RECT window_rect = ZI;
GetWindowRect(hwnd, &window_rect);
RECT client_rect = ZI;
GetClientRect(hwnd, (LPRECT)&client_rect);
ClientToScreen(hwnd, (LPPOINT)&client_rect.left);
ClientToScreen(hwnd, (LPPOINT)&client_rect.right);
/* TODO: Error if we can't get monitor info */
/* Screen dimensions */
MONITORINFO monitor_info = { .cbSize = sizeof(monitor_info) };
GetMonitorInfo(MonitorFromWindow(hwnd, MONITOR_DEFAULTTOPRIMARY), &monitor_info);
RECT monitor_rect = monitor_info.rcMonitor;
window->monitor_width = monitor_rect.right - monitor_rect.left;
window->monitor_height = monitor_rect.bottom - monitor_rect.top;
/* Minimized / maximized */
if (window->flags & SYS_WINDOW_FLAG_SHOWING) {
WINDOWPLACEMENT placement = { .length = sizeof(placement) };
GetWindowPlacement(hwnd, &placement);
if (placement.showCmd == SW_SHOWMINIMIZED) {
window->settings.flags |= SYS_WINDOW_SETTINGS_FLAG_MINIMIZED;
} else {
window->settings.flags &= ~SYS_WINDOW_SETTINGS_FLAG_MINIMIZED;
}
if (placement.showCmd == SW_SHOWMAXIMIZED
|| ((window->settings.flags & SYS_WINDOW_SETTINGS_FLAG_MINIMIZED) && ((placement.flags & WPF_RESTORETOMAXIMIZED) != 0))) {
window->settings.flags |= SYS_WINDOW_SETTINGS_FLAG_MAXIMIZED;
} else {
window->settings.flags &= ~SYS_WINDOW_SETTINGS_FLAG_MAXIMIZED;
}
}
/* Window dimensions */
i32 x = client_rect.left;
i32 y = client_rect.top;
i32 width = client_rect.right - client_rect.left;
i32 height = client_rect.bottom - client_rect.top;
if (!(window->settings.flags & SYS_WINDOW_SETTINGS_FLAG_MINIMIZED)) {
window->x = x;
window->y = y;
window->width = width;
window->height = height;
if (!(window->settings.flags & (SYS_WINDOW_SETTINGS_FLAG_MAXIMIZED | SYS_WINDOW_SETTINGS_FLAG_FULLSCREEN))) {
/* Treat a window resize in non maximized/fullscreen mode as a
* settings change.
*
* TODO: make sure we check for fullscreen here too if we ever
* allow it. */
window->settings.floating_x = x;
window->settings.floating_y = y;
window->settings.floating_width = width;
window->settings.floating_height = height;
}
}
}
INTERNAL void win32_update_window_from_settings(struct win32_window *window, struct sys_window_settings *settings)
{
HWND hwnd = window->hwnd;
struct sys_window_settings old_settings = window->settings;
window->settings = *settings;
i32 show_cmd = SW_HIDE;
if (window->flags & SYS_WINDOW_FLAG_SHOWING) {
show_cmd = SW_NORMAL;
if (settings->flags & SYS_WINDOW_SETTINGS_FLAG_MAXIMIZED) {
show_cmd = SW_SHOWMAXIMIZED;
} else if (settings->flags & SYS_WINDOW_SETTINGS_FLAG_MINIMIZED) {
show_cmd = SW_MINIMIZE;
}
}
RECT rect = ZI;
b32 old_fullscreen = old_settings.flags & SYS_WINDOW_SETTINGS_FLAG_FULLSCREEN;
b32 fullscreen = settings->flags & SYS_WINDOW_SETTINGS_FLAG_FULLSCREEN;
if (fullscreen) {
if (!old_fullscreen) {
/* Entering fullscreen */
SetWindowLongPtrW(hwnd, GWL_STYLE, WS_POPUP);
}
rect = (RECT) {
.left = 0,
.top = 0,
.right = window->monitor_width,
.bottom = window->monitor_height
};
} else {
if (old_fullscreen) {
/* Leaving fullscreen */
SetWindowLongPtrW(hwnd, GWL_STYLE, WS_OVERLAPPEDWINDOW);
}
rect = (RECT) {
.left = settings->floating_x,
.top = settings->floating_y,
.right = settings->floating_x + settings->floating_width,
.bottom = settings->floating_y + settings->floating_height
};
AdjustWindowRect(&rect, WS_OVERLAPPEDWINDOW, 0);
}
WINDOWPLACEMENT wp = {
.length = sizeof(WINDOWPLACEMENT),
.showCmd = show_cmd,
.rcNormalPosition = rect
};
SetWindowPlacement(hwnd, &wp);
/* Make window always on top when debugging */
#if 0
#if RTC
SetWindowPos(hwnd, HWND_TOPMOST, 0, 0, 0, 0, SWP_NOMOVE | SWP_NOSIZE);
#endif
#endif
{
struct arena_temp scratch = scratch_begin_no_conflict();
wchar_t *title_wstr = wstr_from_string(scratch.arena, string_from_cstr_no_limit(settings->title));
SetWindowTextW(hwnd, title_wstr);
scratch_end(scratch);
}
}
INTERNAL void win32_window_wake(struct win32_window *window)
{
/* Post a blank message to the window's thread message queue to wake it. */
PostMessageW(window->hwnd, 0, 0, 0);
}
INTERNAL LRESULT CALLBACK win32_window_proc(HWND hwnd, UINT msg, WPARAM wparam, LPARAM lparam)
{
__prof;
struct win32_window *window = (struct win32_window *)GetWindowLongPtrW(hwnd, GWLP_USERDATA);
if (!window) {
return DefWindowProcW(hwnd, msg, wparam, lparam);
}
/* Update cursor */
if (GetFocus() == window->hwnd) {
u32 cursor_flags = window->cursor_set_flags;
/* Hide cursor */
if (cursor_flags & WIN32_WINDOW_CURSOR_SET_FLAG_HIDE) {
while (ShowCursor(0) >= 0);
}
/* Show cursor */
if (cursor_flags & WIN32_WINDOW_CURSOR_SET_FLAG_SHOW) {
while (ShowCursor(1) < 0);
}
/* Update position */
if (cursor_flags & WIN32_WINDOW_CURSOR_SET_FLAG_POSITION) {
struct v2 window_space_pos = window->cursor_set_position;
POINT p = { window_space_pos.x, window_space_pos.y };
ClientToScreen(window->hwnd, &p);
SetCursorPos(p.x, p.y);
}
/* Stop clipping cursor */
if (cursor_flags & WIN32_WINDOW_CURSOR_SET_FLAG_DISABLE_CLIP) {
ClipCursor(0);
}
/* Clip cursor in window window */
if (cursor_flags & WIN32_WINDOW_CURSOR_SET_FLAG_ENABLE_CLIP) {
i32 left = window->x + math_round_to_int(window->cursor_clip_bounds.x);
i32 right = left + math_round_to_int(window->cursor_clip_bounds.width);
i32 top = window->y + math_round_to_int(window->cursor_clip_bounds.y);
i32 bottom = top + math_round_to_int(window->cursor_clip_bounds.height);
RECT clip = {
.left = clamp_i32(left, window->x, window->x + window->width),
.right = clamp_i32(right, window->x, window->x + window->width),
.top = clamp_i32(top, window->y, window->y + window->height),
.bottom = clamp_i32(bottom, window->y, window->y + window->height)
};
ClipCursor(&clip);
}
window->cursor_set_flags = 0;
}
LRESULT result = 0;
b32 is_release = 0;
switch (msg) {
case WM_QUIT:
case WM_CLOSE:
case WM_DESTROY: {
win32_window_process_event(window, (struct sys_event) { .kind = SYS_EVENT_KIND_QUIT });
} break;
case WM_PAINT: {
result = DefWindowProcW(hwnd, msg, wparam, lparam);
} break;
case WM_ENTERSIZEMOVE:
case WM_MOVE:
case WM_MOVING:
case WM_SIZE:
case WM_SIZING: {
win32_update_window_from_system(window);
result = DefWindowProcW(hwnd, msg, wparam, lparam);
} break;
/* Keyboard buttons */
case WM_SYSKEYUP:
case WM_SYSKEYDOWN: {
if (LOWORD(wparam) != VK_MENU) {
result = DefWindowProcW(hwnd, msg, wparam, lparam);
}
} FALLTHROUGH;
case WM_KEYUP:
case WM_KEYDOWN: {
WORD vk_code = LOWORD(wparam);
b32 is_repeat = 0;
enum sys_event_kind event_kind = SYS_EVENT_KIND_NONE;
if (msg == WM_KEYDOWN || msg == WM_SYSKEYDOWN) {
event_kind = SYS_EVENT_KIND_BUTTON_DOWN;
is_repeat = (lparam & 0x40000000) != 0;
} else if (msg == WM_KEYUP || msg == WM_SYSKEYUP) {
event_kind = SYS_EVENT_KIND_BUTTON_UP;
}
enum sys_btn button = SYS_BTN_NONE;
if (vk_code < countof(G.vk_btn_table)) {
button = G.vk_btn_table[vk_code];
}
win32_window_process_event(
window,
(struct sys_event) {
.kind = event_kind,
.button = button,
.is_repeat = is_repeat
}
);
} break;
/* Text */
case WM_SYSCHAR:
case WM_CHAR: {
u16 utf16_char = (u32)wparam;
/* Decode */
u32 codepoint = 0;
if (uni_is_utf16_high_surrogate(utf16_char)) {
window->utf16_high_surrogate_last_input = utf16_char;
} else if (uni_is_utf16_low_surrogate(utf16_char)) {
u16 high = window->utf16_high_surrogate_last_input;
u16 low = utf16_char;
if (high) {
u16 utf16_pair_bytes[2] = { high, low };
struct uni_decode_utf16_result decoded = uni_decode_utf16((struct string16) { .len = countof(utf16_pair_bytes), .text = utf16_pair_bytes });
if (decoded.advance16 == 2 && decoded.codepoint < U32_MAX) {
codepoint = decoded.codepoint;
}
}
window->utf16_high_surrogate_last_input = 0;
} else {
window->utf16_high_surrogate_last_input = 0;
codepoint = utf16_char;
}
if (codepoint) {
if (codepoint == '\r') {
codepoint = '\n'; /* Just treat all \r as newline */
}
if((codepoint >= 32 && codepoint != 127) || codepoint == '\t' || codepoint == '\n') {
win32_window_process_event(
window,
(struct sys_event) {
.kind = SYS_EVENT_KIND_TEXT,
.text_codepoint = codepoint
}
);
}
}
} break;
/* Mouse buttons */
case WM_LBUTTONUP:
case WM_MBUTTONUP:
case WM_RBUTTONUP:
case WM_XBUTTONUP: {
ReleaseCapture();
is_release = 1;
} FALLTHROUGH;
case WM_LBUTTONDOWN:
case WM_MBUTTONDOWN:
case WM_RBUTTONDOWN:
case WM_XBUTTONDOWN: {
if(!is_release) {
SetCapture(hwnd);
}
enum sys_event_kind event_kind = is_release ? SYS_EVENT_KIND_BUTTON_UP : SYS_EVENT_KIND_BUTTON_DOWN;
enum sys_btn button = 0;
switch(msg) {
case WM_LBUTTONUP: case WM_LBUTTONDOWN: button = SYS_BTN_M1; break;
case WM_RBUTTONUP: case WM_RBUTTONDOWN: button = SYS_BTN_M2; break;
case WM_MBUTTONUP: case WM_MBUTTONDOWN: button = SYS_BTN_M3; break;
case WM_XBUTTONUP: case WM_XBUTTONDOWN: {
u32 wparam_xbutton = GET_XBUTTON_WPARAM(wparam);
if (wparam_xbutton == XBUTTON1) {
button = SYS_BTN_M4;
} else if (wparam_xbutton == XBUTTON2) {
button = SYS_BTN_M5;
}
} break;
}
if (button) {
win32_window_process_event(
window,
(struct sys_event) {
.kind = event_kind,
.button = button
}
);
}
} break;
/* Mouse wheel */
case WM_MOUSEWHEEL: {
int delta = GET_WHEEL_DELTA_WPARAM(wparam);
i32 dir = delta >= 0 ? 1 : -1;
enum sys_btn button = dir >= 0 ? SYS_BTN_MWHEELUP : SYS_BTN_MWHEELDOWN;
for (i32 i = 0; i < (dir * delta); i += WHEEL_DELTA) {
/* Send a button down & button up event simultaneously */
win32_window_process_event(window, (struct sys_event) { .kind = SYS_EVENT_KIND_BUTTON_DOWN, .button = button });
win32_window_process_event(window, (struct sys_event) { .kind = SYS_EVENT_KIND_BUTTON_UP, .button = button });
}
} break;
/* Mouse move */
case WM_MOUSEMOVE: {
i32 x = GET_X_LPARAM(lparam);
i32 y = GET_Y_LPARAM(lparam);
win32_window_process_event(
window,
(struct sys_event) {
.kind = SYS_EVENT_KIND_CURSOR_MOVE,
.cursor_position = V2(x, y)
}
);
} break;
/* Raw mouse move */
case WM_INPUT: {
struct arena_temp scratch = scratch_begin_no_conflict();
/* Read raw input buffer */
UINT buff_size;
GetRawInputData((HRAWINPUT)lparam, RID_INPUT, 0, &buff_size, sizeof(RAWINPUTHEADER));
u8 *buff = arena_push_array(scratch.arena, u8, buff_size);
if (GetRawInputData((HRAWINPUT)lparam, RID_INPUT, buff, &buff_size, sizeof(RAWINPUTHEADER)) != buff_size) {
logf_error("GetRawInputData did not return correct size");
break;
}
RAWINPUT raw = ZI;
MEMCPY(&raw, buff, sizeof(RAWINPUT));
if (raw.header.dwType == RIM_TYPEMOUSE) {
i32 x = raw.data.mouse.lLastX;
i32 y = raw.data.mouse.lLastY;
struct v2 delta = V2(x, y);
win32_window_process_event(
window,
(struct sys_event) {
.kind = SYS_EVENT_KIND_MOUSE_MOVE,
.mouse_delta = delta
}
);
}
scratch_end(scratch);
} break;
/* Minmax info */
case WM_GETMINMAXINFO: {
/* Set minimum window size */
LPMINMAXINFO mmi = (LPMINMAXINFO)lparam;
mmi->ptMinTrackSize.x = 100;
mmi->ptMinTrackSize.y = 100;
} break;
default: {
result = DefWindowProcW(hwnd, msg, wparam, lparam);
} break;
}
return result;
}
struct sys_window *sys_window_alloc(void)
{
__prof;
return (struct sys_window *)win32_window_alloc();
}
void sys_window_release(struct sys_window *sys_window)
{
__prof;
struct win32_window *window = (struct win32_window *)sys_window;
win32_window_release(window);
}
void sys_window_update_settings(struct sys_window *sys_window, struct sys_window_settings *settings)
{
__prof;
struct win32_window *window = (struct win32_window *)sys_window;
struct snc_lock lock = snc_lock_e(&window->settings_mutex);
{
win32_update_window_from_settings(window, settings);
}
snc_unlock(&lock);
}
/* FIXME: Lock settings mutex for these functions */
struct sys_window_settings sys_window_get_settings(struct sys_window *sys_window)
{
struct win32_window *window = (struct win32_window *)sys_window;
return window->settings;
}
void sys_window_show(struct sys_window *sys_window)
{
struct win32_window *window = (struct win32_window *)sys_window;
HWND hwnd = window->hwnd;
struct snc_lock lock = snc_lock_e(&window->settings_mutex);
{
i32 show_cmd = SW_NORMAL;
struct sys_window_settings *settings = &window->settings;
if (settings->flags & SYS_WINDOW_SETTINGS_FLAG_MAXIMIZED) {
show_cmd = SW_SHOWMAXIMIZED;
} else if (settings->flags & SYS_WINDOW_SETTINGS_FLAG_MINIMIZED) {
show_cmd = SW_MINIMIZE;
}
window->flags |= SYS_WINDOW_FLAG_SHOWING;
ShowWindow(hwnd, show_cmd);
BringWindowToTop(hwnd);
}
snc_unlock(&lock);
}
struct v2 sys_window_get_size(struct sys_window *sys_window)
{
struct win32_window *window = (struct win32_window *)sys_window;
return V2((f32)window->width, (f32)window->height);
}
struct v2 sys_window_get_monitor_size(struct sys_window *sys_window)
{
struct win32_window *window = (struct win32_window *)sys_window;
return V2((f32)window->monitor_width, (f32)window->monitor_height);
}
u64 sys_window_get_internal_handle(struct sys_window *sys_window)
{
struct win32_window *window = (struct win32_window *)sys_window;
return (u64)window->hwnd;
}
void sys_window_cursor_set_pos(struct sys_window *sys_window, struct v2 pos)
{
struct win32_window *window = (struct win32_window *)sys_window;
window->cursor_set_position = pos;
window->cursor_set_flags |= WIN32_WINDOW_CURSOR_SET_FLAG_POSITION;
win32_window_wake(window);
}
void sys_window_cursor_show(struct sys_window *sys_window)
{
struct win32_window *window = (struct win32_window *)sys_window;
window->cursor_set_flags |= WIN32_WINDOW_CURSOR_SET_FLAG_SHOW;
win32_window_wake(window);
}
void sys_window_cursor_hide(struct sys_window *sys_window)
{
struct win32_window *window = (struct win32_window *)sys_window;
window->cursor_set_flags |= WIN32_WINDOW_CURSOR_SET_FLAG_HIDE;
win32_window_wake(window);
}
void sys_window_cursor_enable_clip(struct sys_window *sys_window, struct rect bounds)
{
struct win32_window *window = (struct win32_window *)sys_window;
window->cursor_clip_bounds = bounds;
window->cursor_set_flags |= WIN32_WINDOW_CURSOR_SET_FLAG_ENABLE_CLIP;
win32_window_wake(window);
}
void sys_window_cursor_disable_clip(struct sys_window *sys_window)
{
struct win32_window *window = (struct win32_window *)sys_window;
window->cursor_set_flags |= WIN32_WINDOW_CURSOR_SET_FLAG_DISABLE_CLIP;
win32_window_wake(window);
}
/* ========================== *
* Threads
* ========================== */
INTERNAL struct win32_thread *win32_thread_alloc(void)
{
struct win32_thread *t = 0;
struct snc_lock lock = snc_lock_e(&G.threads_mutex);
{
if (G.threads_first_free) {
t = G.threads_first_free;
G.threads_first_free = t->next;
} else {
t = arena_push_no_zero(G.threads_arena, struct win32_thread);
}
MEMZERO_STRUCT(t);
if (!G.threads_first) {
G.threads_first = t;
} else {
G.threads_last->next = t;
}
t->prev = G.threads_last;
G.threads_last = t;
}
snc_unlock(&lock);
return t;
}
INTERNAL void win32_thread_release(struct win32_thread *t)
{
struct snc_lock lock = snc_lock_e(&G.threads_mutex);
{
if (t->prev) {
t->prev->next = t->next;
}
if (t->next) {
t->next->prev = t->prev;
}
if (G.threads_first == t) {
G.threads_first = t->next;
}
if (G.threads_last == t) {
G.threads_last = t->prev;
}
t->next = G.threads_first_free;
}
snc_unlock(&lock);
}
INTERNAL DWORD WINAPI win32_thread_proc(LPVOID vt)
{
fiber_alloc(0);
struct win32_thread *t = (struct win32_thread *)vt;
__profthread(t->thread_name_cstr, t->profiler_group);
/* Initialize COM */
CoInitializeEx(0, COINIT_MULTITHREADED);
/* Set thread name */
if (t->thread_name_wstr[0] != 0) {
SetThreadDescription(GetCurrentThread(), t->thread_name_wstr);
}
logf_info("New thread \"%F\" created with ID %F", FMT_STR(string_from_cstr_no_limit(t->thread_name_cstr)), FMT_UINT(sys_thread_id()));
/* Enter thread entry point */
t->entry_point(t->thread_data);
/* Uninitialize COM */
CoUninitialize();
return 0;
}
struct sys_thread *sys_thread_alloc(sys_thread_func *entry_point, void *thread_data, struct string thread_name, i32 profiler_group)
{
__prof;
struct arena_temp scratch = scratch_begin_no_conflict();
ASSERT(entry_point != 0);
logf_info("Creating thread \"%F\"", FMT_STR(thread_name));
/* Allocate thread object */
struct win32_thread *t = win32_thread_alloc();
t->entry_point = entry_point;
t->thread_data = thread_data;
t->profiler_group = profiler_group;
/* Copy thread name to params */
{
u64 cstr_len = min_u64((countof(t->thread_name_cstr) - 1), thread_name.len);
MEMCPY(t->thread_name_cstr, thread_name.text, cstr_len * sizeof(*t->thread_name_cstr));
t->thread_name_cstr[cstr_len] = 0;
}
{
struct string16 thread_name16 = string16_from_string(scratch.arena, thread_name);
u64 wstr_len = min_u64((countof(t->thread_name_wstr) - 1), thread_name16.len);
MEMCPY(t->thread_name_wstr, thread_name16.text, wstr_len * sizeof(*t->thread_name_wstr));
t->thread_name_wstr[wstr_len] = 0;
}
t->handle = CreateThread(
0,
THREAD_STACK_SIZE,
win32_thread_proc,
t,
0,
0
);
if (!t->handle) {
sys_panic(LIT("Failed to create thread"));
}
scratch_end(scratch);
return (struct sys_thread *)t;
}
void sys_thread_wait_release(struct sys_thread *thread)
{
__prof;
b32 success = sys_thread_try_release(thread, F32_INFINITY);
ASSERT(success);
(UNUSED)success;
}
b32 sys_thread_try_release(struct sys_thread *thread, f32 timeout_seconds)
{
__prof;
b32 success = 0;
struct win32_thread *t = (struct win32_thread *)thread;
HANDLE handle = t->handle;
/* Wait for thread to stop */
if (handle) {
DWORD timeout_ms = (timeout_seconds == F32_INFINITY) ? INFINITE : math_round_to_int(timeout_seconds * 1000);
DWORD wait_res = WaitForSingleObject(handle, timeout_ms);
if (wait_res == WAIT_OBJECT_0) {
success = 1;
CloseHandle(handle);
win32_thread_release(t);
}
}
return success;
}
void sys_thread_force_release(struct sys_thread *thread)
{
__prof;
struct win32_thread *t = (struct win32_thread *)thread;
HANDLE handle = t->handle;
/* Wait for thread to stop */
if (handle) {
DWORD res = WaitForSingleObject(handle, INFINITE);
TerminateThread(handle, 0);
CloseHandle(handle);
ASSERT(res != WAIT_FAILED);
(UNUSED)res;
}
/* Release thread struct */
win32_thread_release(t);
}
u32 sys_thread_id(void)
{
return GetCurrentThreadId();
}
#if RTC
void sys_thread_assert(u32 tid)
{
ASSERT(sys_thread_id() == tid);
}
#endif
/* ========================== *
* Message box
* ========================== */
void sys_message_box(enum sys_message_box_kind kind, struct string message)
{
struct arena_temp scratch = scratch_begin_no_conflict();
wchar_t *message_wstr = wstr_from_string(scratch.arena, message);
const wchar_t *title = L"";
UINT mbox_type = MB_SETFOREGROUND;
switch (kind) {
case SYS_MESSAGE_BOX_KIND_OK: {
mbox_type |= MB_ICONINFORMATION;
} break;
case SYS_MESSAGE_BOX_KIND_WARNING: {
title = L"Warning";
mbox_type |= MB_ICONWARNING;
} break;
case SYS_MESSAGE_BOX_KIND_ERROR: {
title = L"Error";
mbox_type |= MB_ICONERROR;
} break;
case SYS_MESSAGE_BOX_KIND_FATAL: {
title = L"Fatal error";
mbox_type |= MB_ICONSTOP;
} break;
}
logf_info("Showing message box kind %F with text \"%F\"", FMT_SINT(kind), FMT_STR(message));
MessageBoxExW(0, message_wstr, title, mbox_type, 0);
scratch_end(scratch);
}
/* ========================== *
* Clipboard
* ========================== */
void sys_set_clipboard_text(struct string str)
{
if (OpenClipboard(0)) {
struct arena_temp scratch = scratch_begin_no_conflict();
struct string16 str16 = string16_from_string(scratch.arena, str);
u64 str16_size_bytes = str16.len * 2;
EmptyClipboard();
HANDLE handle = GlobalAlloc(GMEM_MOVEABLE, str16_size_bytes + 1);
if (handle) {
u16 *dest_wstr = (u16 *)GlobalLock(handle);
MEMCPY(dest_wstr, str16.text, str16_size_bytes);
dest_wstr[str16.len] = 0;
GlobalUnlock(handle);
SetClipboardData(CF_UNICODETEXT, handle);
}
CloseClipboard();
scratch_end(scratch);
}
}
struct string sys_get_clipboard_text(struct arena *arena)
{
struct string res = ZI;
if (IsClipboardFormatAvailable(CF_UNICODETEXT) && OpenClipboard(0)) {
HANDLE handle = GetClipboardData(CF_UNICODETEXT);
if (handle) {
u16 *src_wstr = (u16 *)GlobalLock(handle);
res = string_from_string16(arena, string16_from_wstr_no_limit(src_wstr));
GlobalUnlock(handle);
}
CloseClipboard();
}
return res;
}
/* ========================== *
* Util
* ========================== */
void sys_true_rand(struct string b)
{
BCryptGenRandom(BCRYPT_RNG_ALG_HANDLE, (PUCHAR)b.text, b.len, 0);
}
u32 sys_num_logical_processors(void)
{
return GetActiveProcessorCount(ALL_PROCESSOR_GROUPS);
}
/* ========================== *
* Entry point
* ========================== */
INTERNAL SYS_JOB_DEF(sys_app_startup_job, _)
{
(UNUSED)_;
struct arena_temp scratch = scratch_begin_no_conflict();
{
struct string cmdline_args = string_from_wstr(scratch.arena, G.cmdline_args_wstr, countof(G.cmdline_args_wstr));
sys_app_startup(cmdline_args);
SetEvent(G.startup_end_event);
}
scratch_end(scratch);
}
INTERNAL SYS_JOB_DEF(sys_app_shutdown_job, _)
{
__prof;
(UNUSED)_;
i32 num_funcs = atomic32_fetch(&G.num_exit_funcs);
for (i32 i = num_funcs - 1; i >= 0; --i) {
sys_exit_func *func = G.exit_funcs[i];
func();
}
SetEvent(G.exit_end_event);
}
int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance, _In_ LPWSTR cmdline_wstr, _In_ int show_code)
{
(UNUSED)instance;
(UNUSED)prev_instance;
(UNUSED)cmdline_wstr;
(UNUSED)show_code;
__profthread("Main thread", PROF_THREAD_GROUP_MAIN);
#if PROFILING
/* Start profiler */
{
__profn("Launch profiler");
STARTUPINFO si = ZI;
si.cb = sizeof(si);
PROCESS_INFORMATION pi = ZI;
wchar_t cmd[sizeof(PROFILING_CMD_WSTR)] = ZI;
MEMCPY(cmd, PROFILING_CMD_WSTR, sizeof(PROFILING_CMD_WSTR));
DeleteFileW(PROFILING_FILE_WSTR);
b32 success = CreateProcessW(0, cmd, 0, 0, 0, DETACHED_PROCESS, 0, 0, &si, &pi);
if (!success) {
MessageBoxExW(0, L"Failed to launch profiler using command '" PROFILING_CMD_WSTR L"'.", L"Error", MB_ICONSTOP | MB_SETFOREGROUND | MB_TOPMOST, 0);
}
}
/* Set internal profiler thread affinities */
{
__profn("Set profiler thread affinities");
wchar_t *prefix_name_wstr = PROFILER_THREAD_PREFIX_WSTR;
u64 prefix_name_wstr_len = ((i32)sizeof(PROFILER_THREAD_PREFIX_WSTR) >> 1) - 1;
if (prefix_name_wstr_len > 0 && PROFILER_THREAD_AFFINITY_MASK != 0) {
DWORD proc_id = GetCurrentProcessId();
HANDLE snapshot = CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD, 0);
if (snapshot != INVALID_HANDLE_VALUE) {
THREADENTRY32 te = ZI;
te.dwSize = sizeof(THREADENTRY32);
if (Thread32First(snapshot, &te)) {
do {
if (te.th32OwnerProcessID == proc_id) {
i32 thread_id = te.th32ThreadID;
HANDLE thread = OpenThread(THREAD_ALL_ACCESS, FALSE, thread_id);
if (thread) {
wchar_t *thread_name_wstr = 0;
HRESULT hr = GetThreadDescription(thread, &thread_name_wstr);
if (SUCCEEDED(hr)) {
u64 thread_name_len = wstr_len_no_limit(thread_name_wstr);
if (thread_name_len >= prefix_name_wstr_len && MEMEQ(thread_name_wstr, prefix_name_wstr, prefix_name_wstr_len)) {
b32 success = SetThreadAffinityMask(thread, PROFILER_THREAD_AFFINITY_MASK) != 0;
ASSERT(success);
(UNUSED)success;
}
}
CloseHandle(thread);
}
}
} while (Thread32Next(snapshot, &te));
}
}
CloseHandle(snapshot);
}
}
#endif
/* ========================== *
* Sys startup
* ========================== */
/* Set up exit events */
G.panic_event = CreateEventW(0, 1, 0, 0);
G.startup_end_event = CreateEventW(0, 1, 0, 0);
G.exit_begin_event = CreateEventW(0, 1, 0, 0);
G.exit_end_event = CreateEventW(0, 1, 0, 0);
/* Init timer */
{
LARGE_INTEGER qpf;
QueryPerformanceFrequency(&qpf);
G.ns_per_qpc = 1000000000 / qpf.QuadPart;
}
{
LARGE_INTEGER qpc;
QueryPerformanceCounter(&qpc);
G.timer_start_qpc = qpc.QuadPart;
}
/* Init fibers */
G.num_fibers = 1; /* Fiber at index 0 always nil */
G.fiber_names_arena = arena_alloc(GIBI(64));
/* Init wait lists */
G.wait_lists_arena = arena_alloc(GIBI(64));
/* Convert main thread to fiber */
fiber_alloc(0);
/* Init job pools */
for (enum sys_pool pool_kind = 0; pool_kind < (i32)countof(G.job_pools); ++pool_kind) {
struct job_pool *pool = &G.job_pools[pool_kind];
/* Init queues */
for (enum sys_priority priority = 0; priority < (i32)countof(pool->job_queues); ++priority) {
struct job_queue *queue = &pool->job_queues[priority];
queue->arena = arena_alloc(GIBI(64));
}
}
u64 cmdline_len = wstr_len(cmdline_wstr, countof(G.cmdline_args_wstr) - 1);
MEMCPY(G.cmdline_args_wstr, cmdline_wstr, cmdline_len * sizeof(*cmdline_wstr));
G.cmdline_args_wstr[cmdline_len] = 0;
G.main_thread_id = GetCurrentThreadId();
SetThreadDescription(GetCurrentThread(), L"Main thread");
/* Query system info */
GetSystemInfo(&G.info);
/* Set up threads */
G.threads_arena = arena_alloc(GIBI(64));
/* Set up watches */
G.watches_arena = arena_alloc(GIBI(64));
/* Set up windows */
G.windows_arena = arena_alloc(GIBI(64));
/* Initialize vk table */
win32_init_vk_btn_table();
/* Create window class */
{
/* Register the window class */
WNDCLASSEXW *wc = &G.window_class;
wc->cbSize = sizeof(WNDCLASSEX);
wc->lpszClassName = WINDOW_CLASS_NAME;
wc->hCursor = LoadCursor(0, IDC_ARROW);
wc->style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC;
//wc->hbrBackground = (HBRUSH)GetStockObject(BLACK_BRUSH);
wc->lpfnWndProc = win32_window_proc;
wc->hInstance = instance;
/* Use first icon resource as window icon (same as explorer) */
wchar_t path[4096] = ZI;
GetModuleFileNameW(instance, path, countof(path));
ExtractIconExW(path, 0, &wc->hIcon, &wc->hIconSm, 1);
if (!RegisterClassExW(wc)) {
sys_panic(LIT("Failed to register window class"));
}
}
/* Register raw input */
if (!atomic32_fetch(&G.panicking)) {
RAWINPUTDEVICE rid = (RAWINPUTDEVICE) {
.usUsagePage = 0x01, /* HID_USAGE_PAGE_GENERIC */
.usUsage = 0x02, /* HID_USAGE_GENERIC_MOUSE */
//.dwFlags = RIDEV_NOLEGACY /* Adds mouse and also ignores legacy mouse messages */
};
b32 success = RegisterRawInputDevices(&rid, 1, sizeof(rid));
ASSERT(success);
(UNUSED)success;
}
/* Start test thread */
struct sys_thread *test_thread = 0;
if (!atomic32_fetch(&G.panicking)) {
test_thread = sys_thread_alloc(test_entry, 0, LIT("Test thread"), PROF_THREAD_GROUP_APP);
}
/* ========================== *
* App startup
* ========================== */
/* Run app start job */
if (!atomic32_fetch(&G.panicking)) {
sys_run(1, sys_app_startup_job, 0, SYS_POOL_FLOATING, SYS_PRIORITY_HIGH, 0);
}
/* Wait for startup end or panic */
if (!atomic32_fetch(&G.panicking)) {
HANDLE handles[] = {
G.startup_end_event,
G.panic_event
};
WaitForMultipleObjects(countof(handles), handles, 0, INFINITE);
}
/* Wait for exit start or panic */
if (!atomic32_fetch(&G.panicking)) {
HANDLE handles[] = {
G.exit_begin_event,
G.panic_event
};
WaitForMultipleObjects(countof(handles), handles, 0, INFINITE);
}
/* ========================== *
* App shutdown
* ========================== */
/* Run exit callbacks job */
if (!atomic32_fetch(&G.panicking)) {
sys_run(1, sys_app_shutdown_job, 0, SYS_POOL_FLOATING, SYS_PRIORITY_HIGH, 0);
}
/* ========================== *
* Sys shutdown
* ========================== */
/* Wait for exit end or panic */
if (!atomic32_fetch(&G.panicking)) {
HANDLE handles[] = {
G.exit_end_event,
G.panic_event
};
WaitForMultipleObjects(countof(handles), handles, 0, INFINITE);
}
/* Signal sys shutdown */
if (!atomic32_fetch(&G.panicking)) {
atomic32_fetch_set(&G.shutdown, 1);
}
/* Shutdown test thread */
if (!atomic32_fetch(&G.panicking)) {
for (enum sys_pool pool_kind = 0; pool_kind < (i32)countof(G.job_pools); ++pool_kind) {
struct job_pool *pool = &G.job_pools[pool_kind];
struct snc_lock lock = snc_lock_e(&pool->workers_wake_mutex);
{
atomic32_fetch_set(&pool->workers_shutdown.v, 1);
snc_cv_signal(&pool->workers_wake_cv, I32_MAX);
}
snc_unlock(&lock);
}
sys_thread_wait_release(test_thread);
}
/* Find any dangling threads that haven't exited gracefully by now */
if (!atomic32_fetch(&G.panicking)) {
struct snc_lock lock = snc_lock_s(&G.threads_mutex);
if (G.threads_first) {
struct arena_temp scratch = scratch_begin_no_conflict();
u64 num_dangling_threads = 0;
struct string threads_msg = ZI;
threads_msg.text = arena_push_dry(scratch.arena, u8);
for (struct win32_thread *t = G.threads_first; t; t = t->next) {
struct string name = string_from_cstr(t->thread_name_cstr, countof(t->thread_name_cstr));
threads_msg.len += string_format(scratch.arena, LIT(" \"%F\"\n"), FMT_STR(name)).len;
++num_dangling_threads;
}
threads_msg = string_format(scratch.arena, LIT("%F dangling thread(s):\n%F"), FMT_UINT(num_dangling_threads), FMT_STR(threads_msg));
sys_panic(threads_msg);
scratch_end(scratch);
}
snc_unlock(&lock);
}
/* Exit */
i32 exit_code = 0;
if (atomic32_fetch(&G.panicking)) {
WaitForSingleObject(G.panic_event, INFINITE);
MessageBoxExW(0, G.panic_wstr, L"Fatal error", MB_ICONSTOP | MB_SETFOREGROUND | MB_TOPMOST, 0);
exit_code = 1;
}
return exit_code;
}
/* ========================== *
* CRT Stub
* ========================== */
#if !CRTLIB
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wmissing-variable-declarations"
#pragma clang diagnostic ignored "-Wmissing-prototypes"
/* Enable floating point */
__attribute((used))
int _fltused;
__attribute((used))
void __stdcall wWinMainCRTStartup(void)
{
int result = wWinMain(GetModuleHandle(0), 0, GetCommandLineW(), 0);
ExitProcess(result);
}
#pragma clang diagnostic pop
#endif /* !CRTLIB */