From 4df1418aa5a3665ca70708476740e70fb94ee147 Mon Sep 17 00:00:00 2001 From: jacob Date: Sat, 10 Jan 2026 14:28:45 -0600 Subject: [PATCH] formatting --- src/gpu/gpu_core.h | 8 ++++---- src/gpu/gpu_dx12/gpu_dx12_core.c | 20 ++++++++++---------- src/gpu/gpu_dx12/gpu_dx12_core.h | 2 +- src/pp/pp_sim/pp_sim_core.c | 5 ++--- 4 files changed, 17 insertions(+), 18 deletions(-) diff --git a/src/gpu/gpu_core.h b/src/gpu/gpu_core.h index 9214552d..2e912a7a 100644 --- a/src/gpu/gpu_core.h +++ b/src/gpu/gpu_core.h @@ -26,7 +26,7 @@ Enum(G_QueueKind) G_QueueKind_AsyncCompute = G_QueueKind_Direct, G_QueueKind_AsyncCopy = G_QueueKind_Direct, #endif - G_NumQueues + G_QueueKind_COUNT }; Enum(G_QueueMask) @@ -40,13 +40,13 @@ Enum(G_QueueMask) G_QueueMask_AsyncCompute = G_QueueMask_Direct, G_QueueMask_AsyncCopy = G_QueueMask_Direct, #endif - G_QueueMask_All = (0xFFFFFFFF >> (32 - G_NumQueues)) + G_QueueMask_All = (0xFFFFFFFF >> (32 - G_QueueKind_COUNT)) }; -#define G_MaskFromQueue(queue_kind) (1 << queue_kind) +#define G_QueueMaskFromKind(queue_kind) (1 << queue_kind) Struct(G_QueueCompletions) { - i64 v[G_NumQueues]; // Array of completions indexed by queue kind + i64 v[G_QueueKind_COUNT]; // Array of completions indexed by queue kind }; // All waiters will wait until specified queues reach their value in the `completions` array diff --git a/src/gpu/gpu_dx12/gpu_dx12_core.c b/src/gpu/gpu_dx12/gpu_dx12_core.c index 4ba6b8df..6a0640dc 100644 --- a/src/gpu/gpu_dx12/gpu_dx12_core.c +++ b/src/gpu/gpu_dx12/gpu_dx12_core.c @@ -292,7 +292,7 @@ void G_Bootstrap(void) // Create debug print buffers if (GPU_SHADER_PRINT) { - for (G_QueueKind queue_kind = 0; queue_kind < G_NumQueues; ++queue_kind) + for (G_QueueKind queue_kind = 0; queue_kind < G_QueueKind_COUNT; ++queue_kind) { G_D12_Queue *queue = G_D12_QueueFromKind(queue_kind); if (queue_kind != G_QueueKind_AsyncCopy) @@ -324,7 +324,7 @@ void G_Bootstrap(void) ////////////////////////////// //- Start workers - // for (G_QueueKind kind = 0; kind < G_NumQueues; ++kind) + // for (G_QueueKind kind = 0; kind < G_QueueKind_COUNT; ++kind) // { // String name = Zi; // if (kind == G_QueueKind_Direct) name = Lit("Gpu direct queue worker"); @@ -2734,7 +2734,7 @@ i64 G_CompletionTargetFromQueue(G_QueueKind queue_kind) G_QueueCompletions G_CompletionValuesFromQueues(G_QueueMask queue_mask) { G_QueueCompletions completions = Zi; - for (G_QueueKind queue_kind = 0; queue_kind < G_NumQueues; ++queue_kind) + for (G_QueueKind queue_kind = 0; queue_kind < G_QueueKind_COUNT; ++queue_kind) { if (queue_mask & (1 << queue_kind)) { @@ -2747,7 +2747,7 @@ G_QueueCompletions G_CompletionValuesFromQueues(G_QueueMask queue_mask) G_QueueCompletions G_CompletionTargetsFromQueues(G_QueueMask queue_mask) { G_QueueCompletions completions = Zi; - for (G_QueueKind queue_kind = 0; queue_kind < G_NumQueues; ++queue_kind) + for (G_QueueKind queue_kind = 0; queue_kind < G_QueueKind_COUNT; ++queue_kind) { if (queue_mask & (1 << queue_kind)) { @@ -2761,11 +2761,11 @@ void G_SyncEx(G_QueueBarrierDesc desc) { u64 fences_count = 0; - ID3D12Fence *fences[G_NumQueues] = Zi; - i64 fence_targets[G_NumQueues] = Zi; + ID3D12Fence *fences[G_QueueKind_COUNT] = Zi; + i64 fence_targets[G_QueueKind_COUNT] = Zi; // Grab fences - for (G_QueueKind completion_queue_kind = 0; completion_queue_kind < G_NumQueues; ++ completion_queue_kind) + for (G_QueueKind completion_queue_kind = 0; completion_queue_kind < G_QueueKind_COUNT; ++ completion_queue_kind) { G_D12_Queue *completion_queue = G_D12_QueueFromKind(completion_queue_kind); i64 target = desc.completions.v[completion_queue_kind]; @@ -2782,7 +2782,7 @@ void G_SyncEx(G_QueueBarrierDesc desc) } // Sync Queues - for (G_QueueKind waiter_queue_kind = 0; waiter_queue_kind < G_NumQueues; ++ waiter_queue_kind) + for (G_QueueKind waiter_queue_kind = 0; waiter_queue_kind < G_QueueKind_COUNT; ++ waiter_queue_kind) { if (desc.wait_queues & (1 << waiter_queue_kind)) { @@ -3073,7 +3073,7 @@ void G_D12_CollectionWorkerEntryPoint(WaveLaneCtx *lane) SleepSeconds(0.100); // Copy print-buffers to readback - for (G_QueueKind queue_kind = 0; queue_kind < G_NumQueues; ++queue_kind) + for (G_QueueKind queue_kind = 0; queue_kind < G_QueueKind_COUNT; ++queue_kind) { G_D12_Queue *queue = G_D12_QueueFromKind(queue_kind); if (!G_IsResourceNil(queue->print_buffer)) @@ -3098,7 +3098,7 @@ void G_D12_CollectionWorkerEntryPoint(WaveLaneCtx *lane) // TODO: Collect asynchronously G_SyncCpu(G_QueueMask_Direct | G_QueueMask_AsyncCompute); - for (G_QueueKind queue_kind = 0; queue_kind < G_NumQueues; ++queue_kind) + for (G_QueueKind queue_kind = 0; queue_kind < G_QueueKind_COUNT; ++queue_kind) { G_D12_Queue *queue = G_D12_QueueFromKind(queue_kind); if (!G_IsResourceNil(queue->print_buffer)) diff --git a/src/gpu/gpu_dx12/gpu_dx12_core.h b/src/gpu/gpu_dx12/gpu_dx12_core.h index 5eb2e8ae..21011813 100644 --- a/src/gpu/gpu_dx12/gpu_dx12_core.h +++ b/src/gpu/gpu_dx12/gpu_dx12_core.h @@ -434,7 +434,7 @@ Struct(G_D12_Ctx) Atomic64 driver_descriptors_allocated; // Queues - G_D12_Queue queues[G_NumQueues]; + G_D12_Queue queues[G_QueueKind_COUNT]; // Descriptor heaps G_D12_DescriptorHeap descriptor_heaps[G_D12_DescriptorHeapKind_COUNT]; diff --git a/src/pp/pp_sim/pp_sim_core.c b/src/pp/pp_sim/pp_sim_core.c index 5e20b2c6..6869e82a 100644 --- a/src/pp/pp_sim/pp_sim_core.c +++ b/src/pp/pp_sim/pp_sim_core.c @@ -259,8 +259,8 @@ S_Shape S_WorldShapeFromEnt(S_Ent *ent) //////////////////////////////////////////////////////////// //~ Collision -// NOTE: Everything here is pretty much copied directly from the old physics -// prototype. It's slow and does more than what we need. For example we should +// NOTE: Everything here is pretty much copied directly from the old prototype. +// The techniques are slow and do more than what we need. For example we should // probably just switch from GJK to SAT for shape collision testing. S_SupportPoint S_SupportPointFromShapeEx(S_Shape shape, Vec2 dir, i32 ignore_idx) @@ -1872,7 +1872,6 @@ void S_TickForever(WaveLaneCtx *lane) } } - ////////////////////////////// //- Run solver steps