add toggleable topmost window key

This commit is contained in:
jacob 2025-07-25 17:47:38 -05:00
parent a8651f7aa7
commit 1fc534004e
5 changed files with 111 additions and 133 deletions

View File

@ -2319,8 +2319,6 @@ INTERNAL SYS_JOB_DEF(dx12_upload_job, job)
D3D12_RESOURCE_DESC desc = ZI;
ID3D12Resource_GetDesc(r->resource, &desc);
#if 1
{
u64 upload_size = 0;
u64 upload_row_size = 0;
@ -2413,89 +2411,7 @@ INTERNAL SYS_JOB_DEF(dx12_upload_job, job)
/* Release upload heap now */
dx12_resource_release_now(upload);
}
#else
/* Create temp upload heap */
struct dx12_resource *upload = 0;
{
enum dx12_resource_view_flags upload_view_flags = DX12_RESOURCE_VIEW_FLAG_NONE;
D3D12_HEAP_PROPERTIES upload_heap_props = { .Type = D3D12_HEAP_TYPE_UPLOAD };
upload_heap_props.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
upload_heap_props.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
D3D12_HEAP_FLAGS upload_heap_flags = D3D12_HEAP_FLAG_CREATE_NOT_ZEROED;
D3D12_RESOURCE_DESC upload_desc = ZI;
upload_desc.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
upload_desc.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
upload_desc.Format = DXGI_FORMAT_UNKNOWN;
upload_desc.Alignment = 0;
upload_desc.Width = upload_size;
upload_desc.Height = 1;
upload_desc.DepthOrArraySize = 1;
upload_desc.MipLevels = 1;
upload_desc.SampleDesc.Count = 1;
upload_desc.SampleDesc.Quality = 0;
D3D12_RESOURCE_STATES upload_initial_state = D3D12_RESOURCE_STATE_GENERIC_READ;
upload = dx12_resource_alloc(upload_heap_props, upload_heap_flags, upload_desc, upload_initial_state, upload_view_flags);
/* Copy to upload heap */
{
D3D12_RANGE read_range = ZI;
void *mapped = 0;
HRESULT hr = ID3D12Resource_Map(upload->resource, 0, &read_range, &mapped);
if (FAILED(hr) || !mapped) {
/* TODO: Don't panic */
sys_panic(LIT("Failed to map texture upload resource"));
}
u8 *dst = (u8 *)mapped + footprint.Offset;
u8 *src = data;
for (u32 y = 0; y < upload_num_rows; ++y) {
MEMCPY(dst + y * footprint.Footprint.RowPitch, src + y * upload_row_size, upload_row_size);
}
ID3D12Resource_Unmap(upload->resource, 0, 0);
}
}
/* Copy from upload heap to texture */
struct command_queue *cq = G.command_queues[DX12_QUEUE_COPY_BACKGROUND];
struct command_list *cl = command_list_open(cq->cl_pool);
{
__profnc_dx12(cl->cq->prof, cl->cl, "Upload texture", RGB32_F(0.2, 0.5, 0.2));
D3D12_TEXTURE_COPY_LOCATION dst_loc = {
.pResource = r->resource,
.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX,
.SubresourceIndex = 0,
};
D3D12_TEXTURE_COPY_LOCATION src_loc = {
.pResource = upload->resource,
.Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT,
.PlacedFootprint = footprint,
};
ID3D12GraphicsCommandList_CopyTextureRegion(cl->cl, &dst_loc, 0, 0, 0, &src_loc, 0);
}
u64 fence_target = command_list_close(cl);
/* Wait on fence so we know it's safe to release upload heap */
if (ID3D12Fence_GetCompletedValue(cq->submit_fence) < fence_target) {
struct dx12_wait_fence_job_sig wait_sig = ZI;
wait_sig.fence = cq->submit_fence;
wait_sig.target = fence_target;
struct snc_counter counter = ZI;
sys_run(1, dx12_wait_fence_job, &wait_sig, SYS_POOL_FLOATING, SYS_PRIORITY_LOW, &counter);
snc_counter_wait(&counter);
}
/* Release upload heap now */
dx12_resource_release_now(upload);
#endif
}
/* ========================== *

View File

@ -440,6 +440,8 @@ void sys_window_cursor_enable_clip(struct sys_window *sys_window, struct rect bo
void sys_window_cursor_disable_clip(struct sys_window *sys_window);
void sys_window_toggle_topmost(struct sys_window *sys_window);
/* ========================== *
* Address
* ========================== */

View File

@ -61,7 +61,7 @@ typedef THREAD_DEF(thread_func, data);
#define MAX_EXIT_FUNCS 1024
/* Arbitrary threshold for determining when to fall back from a looped WakeByAddressSingle to WakeByAddressAll */
#define WAKE_ALL_THRESHOLD 24
#define WAKE_ALL_THRESHOLD 16
struct ticket_mutex {
struct atomic64_padded ticket;
@ -113,6 +113,9 @@ struct win32_window {
struct v2 cursor_set_position;
struct rect cursor_clip_bounds;
struct atomic32 topmost_toggles;
b32 is_topmost;
struct snc_mutex event_arena_swp_mutex;
i32 current_event_arena_index;
struct arena *event_arenas[2];
@ -262,8 +265,7 @@ struct alignas(64) job_pool {
/* Workers */
struct atomic32_padded workers_shutdown;
struct atomic64_padded num_jobs_in_queue;
struct snc_mutex workers_wake_mutex;
struct snc_cv workers_wake_cv;
struct ticket_mutex workers_wake_lock;
i32 num_worker_threads;
i32 thread_priority;
@ -671,11 +673,11 @@ INTERNAL void wake_fibers_locked(i32 num_fibers, struct fiber **fibers)
/* Resume jobs */
/* TODO: Batch submit waiters based on queue kind rather than one at a time */
i32 num_workers_to_wake[NUM_SYS_POOLS] = ZI;
i32 job_counts_per_pool[NUM_SYS_POOLS] = ZI;
for (i32 i = 0; i < num_fibers; ++i) {
struct fiber *fiber = fibers[i];
enum sys_pool pool_kind = fiber->job_pool;
++num_workers_to_wake[pool_kind];
++job_counts_per_pool[pool_kind];
struct job_pool *pool = &G.job_pools[pool_kind];
struct job_queue *queue = &pool->job_queues[fiber->job_priority];
tm_lock(&queue->lock);
@ -706,16 +708,22 @@ INTERNAL void wake_fibers_locked(i32 num_fibers, struct fiber **fibers)
/* Wake workers */
if (num_fibers > 0) {
for (enum sys_pool pool_kind = 0; pool_kind < (i32)countof(num_workers_to_wake); ++pool_kind) {
i32 wake_count = num_workers_to_wake[pool_kind];
if (wake_count > 0) {
for (enum sys_pool pool_kind = 0; pool_kind < (i32)countof(job_counts_per_pool); ++pool_kind) {
i32 job_count = job_counts_per_pool[pool_kind];
if (job_count > 0) {
struct job_pool *pool = &G.job_pools[pool_kind];
struct snc_lock lock = snc_lock_e(&pool->workers_wake_mutex);
tm_lock(&pool->workers_wake_lock);
{
atomic64_fetch_add(&pool->num_jobs_in_queue.v, wake_count);
snc_cv_signal(&pool->workers_wake_cv, wake_count);
atomic64_fetch_add(&pool->num_jobs_in_queue.v, job_count);
if (job_count >= WAKE_ALL_THRESHOLD) {
WakeByAddressAll(&pool->num_jobs_in_queue);
} else {
for (i32 i = 0; i < job_count; ++i) {
WakeByAddressSingle(&pool->num_jobs_in_queue);
}
}
}
snc_unlock(&lock);
tm_unlock(&pool->workers_wake_lock);
}
}
}
@ -857,6 +865,7 @@ INTERNAL struct fiber *fiber_alloc(struct job_pool *pool)
}
if (new_name_cstr != 0) {
__profn("Initialize fiber");
fiber->id = fiber_id;
/* Id to ASCII */
@ -893,11 +902,21 @@ INTERNAL struct fiber *fiber_alloc(struct job_pool *pool)
/* Init win32 fiber */
if (pool != 0) {
__profn("CreateFiber");
fiber->addr = CreateFiber(FIBER_STACK_SIZE, job_fiber_entry, (void *)(i64)fiber_id);
} else {
/* Fiber is not a part of a job pool, convert thread to fiber */
__profn("ConvertThreadToFiber");
fiber->addr = ConvertThreadToFiber((void *)(i64)fiber_id);
}
/* Init scratch context */
{
__profn("Initialize scratch context");
for (u32 i = 0; i < countof(fiber->scratch_ctx.arenas); ++i) {
fiber->scratch_ctx.arenas[i] = arena_alloc(GIBI(64));
}
}
}
fiber->wait_addr = 0;
fiber->wait_time = 0;
@ -1001,6 +1020,7 @@ INTERNAL void job_fiber_entry(void *id_ptr)
void sys_run(i32 count, sys_job_func *func, void *sig, enum sys_pool pool_kind, enum sys_priority priority, struct snc_counter *counter)
{
__prof;
if (count > 0) {
if (counter) {
snc_counter_add(counter, count);
@ -1036,12 +1056,20 @@ void sys_run(i32 count, sys_job_func *func, void *sig, enum sys_pool pool_kind,
tm_unlock(&queue->lock);
/* Wake workers */
struct snc_lock lock = snc_lock_e(&pool->workers_wake_mutex);
{
atomic64_fetch_add(&pool->num_jobs_in_queue.v, count);
snc_cv_signal(&pool->workers_wake_cv, count);
tm_lock(&pool->workers_wake_lock);
{
atomic64_fetch_add(&pool->num_jobs_in_queue.v, count);
if (count >= WAKE_ALL_THRESHOLD) {
WakeByAddressAll(&pool->num_jobs_in_queue);
} else {
for (i32 i = 0; i < count; ++i) {
WakeByAddressSingle(&pool->num_jobs_in_queue);
}
}
}
tm_unlock(&pool->workers_wake_lock);
}
snc_unlock(&lock);
}
}
@ -1203,6 +1231,7 @@ INTERNAL THREAD_DEF(job_worker_entry, worker_ctx_arg)
case YIELD_KIND_WAIT:
{
__profn("Process fiber wait");
volatile void *wait_addr = yield.wait.addr;
void *wait_cmp = yield.wait.cmp;
u32 wait_size = yield.wait.size;
@ -1339,17 +1368,27 @@ INTERNAL THREAD_DEF(job_worker_entry, worker_ctx_arg)
}
}
/* Wait */
struct snc_lock wake_lock = snc_lock_s(&pool->workers_wake_mutex);
{
shutdown = atomic32_fetch(&pool->workers_shutdown.v);
while (atomic64_fetch(&pool->num_jobs_in_queue.v) <= 0 && !shutdown) {
//__profnc("Wait for job", RGB32_F(0.75, 0.75, 0));
snc_cv_wait(&pool->workers_wake_cv, &wake_lock);
/* Wait for job */
i64 num_jobs_in_queue = atomic64_fetch(&pool->num_jobs_in_queue.v);
shutdown = atomic32_fetch(&pool->workers_shutdown.v);
if (num_jobs_in_queue <= 0 && !shutdown) {
//__profnc("Wait for job", RGB32_F(0.75, 0.75, 0));
tm_lock(&pool->workers_wake_lock);
{
i64 num_jobs_in_queue = atomic64_fetch(&pool->num_jobs_in_queue.v);
shutdown = atomic32_fetch(&pool->workers_shutdown.v);
while (num_jobs_in_queue <= 0 && !shutdown) {
{
tm_unlock(&pool->workers_wake_lock);
WaitOnAddress(&pool->num_jobs_in_queue, &num_jobs_in_queue, sizeof(num_jobs_in_queue), INFINITE);
tm_lock(&pool->workers_wake_lock);
}
shutdown = atomic32_fetch(&pool->workers_shutdown.v);
num_jobs_in_queue = atomic64_fetch(&pool->num_jobs_in_queue.v);
}
}
tm_unlock(&pool->workers_wake_lock);
}
snc_unlock(&wake_lock);
}
/* Worker shutdown */
@ -1434,14 +1473,7 @@ INTERNAL THREAD_DEF(job_scheduler_entry, _)
struct sys_scratch_ctx *sys_scratch_ctx_from_fiber_id(i16 id)
{
struct fiber *fiber = fiber_from_id(id);
struct sys_scratch_ctx *scratch_ctx = &fiber->scratch_ctx;
if (!scratch_ctx->arenas[0]) {
__profn("Initialize scratch context");
for (u32 i = 0; i < countof(scratch_ctx->arenas); ++i) {
scratch_ctx->arenas[i] = arena_alloc(GIBI(64));
}
}
return scratch_ctx;
return &fiber->scratch_ctx;
}
/* ========================== *
@ -1591,19 +1623,23 @@ void sys_mkdir(struct string path)
int err_code = SHCreateDirectory(0, path_wstr);
struct string err = ZI;
switch (err_code) {
case ERROR_BAD_PATHNAME: {
case ERROR_BAD_PATHNAME:
{
err = LIT("Bad path name");
} break;
case ERROR_FILENAME_EXCED_RANGE: {
case ERROR_FILENAME_EXCED_RANGE:
{
err = LIT("Path name too long");
} break;
case ERROR_FILE_EXISTS: {
case ERROR_FILE_EXISTS:
{
err = LIT("A file already exists at this location");
} break;
case ERROR_CANCELLED: {
case ERROR_CANCELLED:
{
err = LIT("User canceled the operation");
} break;
@ -1804,10 +1840,11 @@ struct sys_file_time sys_file_get_time(struct sys_file file)
FileTimeToSystemTime(&ft_accessed, &st_accessed);
FileTimeToSystemTime(&ft_modified, &st_modified);
return (struct sys_file_time) {
return (struct sys_file_time)
{
.created = win32_time_to_sys_time(st_created),
.accessed = win32_time_to_sys_time(st_accessed),
.modified = win32_time_to_sys_time(st_modified)
.accessed = win32_time_to_sys_time(st_accessed),
.modified = win32_time_to_sys_time(st_modified)
};
} else {
return (struct sys_file_time) { 0 };
@ -2369,13 +2406,6 @@ INTERNAL void win32_update_window_from_settings(struct win32_window *window, str
};
SetWindowPlacement(hwnd, &wp);
/* Make window always on top when debugging */
#if 0
#if RTC
SetWindowPos(hwnd, HWND_TOPMOST, 0, 0, 0, 0, SWP_NOMOVE | SWP_NOSIZE);
#endif
#endif
{
struct arena_temp scratch = scratch_begin_no_conflict();
wchar_t *title_wstr = wstr_from_string(scratch.arena, string_from_cstr_no_limit(settings->title));
@ -2444,6 +2474,21 @@ INTERNAL LRESULT CALLBACK win32_window_proc(HWND hwnd, UINT msg, WPARAM wparam,
window->cursor_set_flags = 0;
}
/* Update always on top */
{
u32 toggles = atomic32_fetch_set(&window->topmost_toggles, 0);
if (toggles % 2 != 0) {
b32 new_topmost = !window->is_topmost;
if (new_topmost) {
SetWindowText(hwnd, L"============================= TOP =============================");
} else {
SetWindowText(hwnd, L"");
}
SetWindowPos(hwnd, new_topmost ? HWND_TOPMOST : HWND_NOTOPMOST, 0, 0, 0, 0, SWP_NOMOVE | SWP_NOSIZE);
window->is_topmost = new_topmost;
}
}
LRESULT result = 0;
b32 is_release = 0;
switch (msg) {
@ -2767,6 +2812,13 @@ void sys_window_cursor_disable_clip(struct sys_window *sys_window)
win32_window_wake(window);
}
void sys_window_toggle_topmost(struct sys_window *sys_window)
{
struct win32_window *window = (struct win32_window *)sys_window;
atomic32_fetch_add(&window->topmost_toggles, 1);
win32_window_wake(window);
}
/* ========================== *
* Address
* ========================== */
@ -3632,12 +3684,13 @@ int CALLBACK wWinMain(_In_ HINSTANCE instance, _In_opt_ HINSTANCE prev_instance,
atomic32_fetch_set(&G.shutdown, 1);
for (enum sys_pool pool_kind = 0; pool_kind < (i32)countof(G.job_pools); ++pool_kind) {
struct job_pool *pool = &G.job_pools[pool_kind];
struct snc_lock lock = snc_lock_e(&pool->workers_wake_mutex);
tm_lock(&pool->workers_wake_lock);
{
atomic32_fetch_set(&pool->workers_shutdown.v, 1);
snc_cv_signal(&pool->workers_wake_cv, I32_MAX);
atomic64_fetch_set(&pool->num_jobs_in_queue.v, -100000);
WakeByAddressAll(&pool->num_jobs_in_queue);
}
snc_unlock(&lock);
tm_unlock(&pool->workers_wake_lock);
}
}

View File

@ -176,6 +176,7 @@ GLOBAL READONLY enum user_bind_kind g_binds[SYS_BTN_COUNT] = {
[SYS_BTN_F1] = USER_BIND_KIND_DEBUG_PAUSE,
[SYS_BTN_F2] = USER_BIND_KIND_DEBUG_CAMERA,
[SYS_BTN_F3] = USER_BIND_KIND_DEBUG_DRAW,
[SYS_BTN_F5] = USER_BIND_KIND_DEBUG_TOGGLE_TOPMOST,
[SYS_BTN_GRAVE_ACCENT] = USER_BIND_KIND_DEBUG_CONSOLE,
[SYS_BTN_ALT] = USER_BIND_KIND_FULLSCREEN_MOD,
[SYS_BTN_ENTER] = USER_BIND_KIND_FULLSCREEN,
@ -814,6 +815,11 @@ INTERNAL void user_update(struct sys_window *window)
G.debug_draw = !G.debug_draw;
}
if (G.bind_states[USER_BIND_KIND_DEBUG_TOGGLE_TOPMOST].num_presses > 0) {
sys_window_toggle_topmost(window);
logf_success("Toggle topmost");
}
if (G.bind_states[USER_BIND_KIND_DEBUG_CONSOLE].num_presses > 0) {
G.debug_console = !G.debug_console;
}

View File

@ -43,6 +43,7 @@ enum user_bind_kind {
USER_BIND_KIND_DEBUG_DELETE,
USER_BIND_KIND_DEBUG_TELEPORT,
USER_BIND_KIND_DEBUG_EXPLODE,
USER_BIND_KIND_DEBUG_TOGGLE_TOPMOST,
USER_BIND_KIND_FULLSCREEN_MOD,
USER_BIND_KIND_FULLSCREEN,
USER_BIND_KIND_ZOOM_IN,