diff --git a/src/gpu/gpu.lay b/src/gpu/gpu.lay index afd4c075..a2072ffb 100644 --- a/src/gpu/gpu.lay +++ b/src/gpu/gpu.lay @@ -8,7 +8,7 @@ ////////////////////////////// //- Resources -@EmbedDir GPU_Resources gpu_res +@EmbedDir G_Resources gpu_res ////////////////////////////// //- Api @@ -19,8 +19,8 @@ @IncludeG gpu_shader_core.cgh -@Bootstrap GPU_Bootstrap -@Bootstrap GPU_BootstrapExtra +@Bootstrap G_Bootstrap +@Bootstrap G_BootstrapExtra ////////////////////////////// //- Impl diff --git a/src/gpu/gpu_common.c b/src/gpu/gpu_common.c index 3b5998ad..49dd2695 100644 --- a/src/gpu/gpu_common.c +++ b/src/gpu/gpu_common.c @@ -1,24 +1,24 @@ -GPU_SharedUtilState GPU_shared_util_state = ZI; -ThreadLocal GPU_ArenaHandle GPU_t_perm_arena = ZI; +G_SharedUtilState G_shared_util_state = ZI; +ThreadLocal G_ArenaHandle G_t_perm_arena = ZI; //////////////////////////////////////////////////////////// //~ Bootstrap -void GPU_BootstrapExtra(void) +void G_BootstrapExtra(void) { - GPU_SharedUtilState *g = &GPU_shared_util_state; + G_SharedUtilState *g = &G_shared_util_state; - GPU_ArenaHandle gpu_perm = GPU_PermArena(); + G_ArenaHandle gpu_perm = G_PermArena(); - GPU_CommandListHandle cl = GPU_PrepareCommandList(GPU_QueueKind_Direct); + G_CommandListHandle cl = G_PrepareCommandList(G_QueueKind_Direct); { /* Init quad index buffer */ { - GPU_ResourceHandle quad_indices = ZI; + G_ResourceHandle quad_indices = ZI; u16 quad_data[6] = { 0, 1, 2, 0, 2, 3 }; - quad_indices = GPU_PushBuffer(gpu_perm, u16, countof(quad_data)); - GPU_CopyCpuToBuffer(cl, quad_indices, 0, quad_data, RNGU64(0, sizeof(quad_data))); - g->quad_indices = GPU_IdxBuff16(quad_indices); + quad_indices = G_PushBuffer(gpu_perm, u16, countof(quad_data)); + G_CopyCpuToBuffer(cl, quad_indices, 0, quad_data, RNGU64(0, sizeof(quad_data))); + g->quad_indices = G_IdxBuff16(quad_indices); } /* TODO: Init debug print queues */ @@ -27,35 +27,35 @@ void GPU_BootstrapExtra(void) /* Init point sampler */ { - GPU_ResourceHandle pt_sampler = GPU_PushSamplerResource(gpu_perm, (GPU_SamplerResourceDesc) { .filter = GPU_Filter_MinMagMipPoint }); - GPU_PushSamplerStateRef(gpu_perm, pt_sampler, .forced = GPU_BasicPointSampler.v); + G_ResourceHandle pt_sampler = G_PushSamplerResource(gpu_perm, (G_SamplerResourceDesc) { .filter = G_Filter_MinMagMipPoint }); + G_PushSamplerStateRef(gpu_perm, pt_sampler, .forced = G_BasicPointSampler.v); } /* Init noise texture */ { - GPU_ResourceHandle noise_tex = ZI; - String noise_data = DataFromResource(ResourceKeyFromStore(&GPU_Resources, Lit("noise_128x128x64_16.dat"))); + G_ResourceHandle noise_tex = ZI; + String noise_data = DataFromResource(ResourceKeyFromStore(&G_Resources, Lit("noise_128x128x64_16.dat"))); Vec3I32 noise_dims = VEC3I32(128, 128, 64); if (noise_data.len != noise_dims.x * noise_dims.y * noise_dims.z * 2) { Panic(Lit("Unexpected noise texture size")); } - noise_tex = GPU_PushTexture3D(gpu_perm, - GPU_Format_R16_Uint, + noise_tex = G_PushTexture3D(gpu_perm, + G_Format_R16_Uint, noise_dims, - GPU_Layout_AnyQueue_ShaderRead_CopyRead_CopyWrite_Present); - GPU_CopyCpuToTexture(cl, + G_Layout_AnyQueue_ShaderRead_CopyRead_CopyWrite_Present); + G_CopyCpuToTexture(cl, noise_tex, VEC3I32(0, 0, 0), noise_data.text, noise_dims, RNG3I32(VEC3I32(0, 0, 0), noise_dims)); - GPU_PushTexture3DRef(gpu_perm, noise_tex, .forced = GPU_BasicNoiseTexture.v); + G_PushTexture3DRef(gpu_perm, noise_tex, .forced = G_BasicNoiseTexture.v); } } - GPU_CommitCommandList(cl); + G_CommitCommandList(cl); - GPU_SyncOtherQueues(GPU_QueueKind_Direct); + G_SyncOtherQueues(G_QueueKind_Direct); } //////////////////////////////////////////////////////////// @@ -63,48 +63,48 @@ void GPU_BootstrapExtra(void) //- Arena -GPU_ArenaHandle GPU_PermArena(void) +G_ArenaHandle G_PermArena(void) { - GPU_ArenaHandle perm = GPU_t_perm_arena; - if (GPU_IsArenaNil(perm)) + G_ArenaHandle perm = G_t_perm_arena; + if (G_IsArenaNil(perm)) { - GPU_t_perm_arena = GPU_AcquireArena(); - perm = GPU_t_perm_arena; + G_t_perm_arena = G_AcquireArena(); + perm = G_t_perm_arena; } return perm; } //- Cpu -> Gpu copy -GPU_ResourceHandle GPU_PushBufferFromCpu(GPU_ArenaHandle gpu_arena, GPU_CommandListHandle cl, String src) +G_ResourceHandle G_PushBufferFromCpu(G_ArenaHandle gpu_arena, G_CommandListHandle cl, String src) { - GPU_ResourceHandle buffer = GPU_PushBufferResource(gpu_arena, (GPU_BufferResourceDesc) { .size = src.len }); - GPU_CopyCpuToBuffer(cl, buffer, 0, src.text, RNGU64(0, src.len)); - GPU_MemorySync( + G_ResourceHandle buffer = G_PushBufferResource(gpu_arena, (G_BufferResourceDesc) { .size = src.len }); + G_CopyCpuToBuffer(cl, buffer, 0, src.text, RNGU64(0, src.len)); + G_MemorySync( cl, buffer, - GPU_Stage_Copy, GPU_Access_CopyWrite, - GPU_Stage_All, GPU_Access_All + G_Stage_Copy, G_Access_CopyWrite, + G_Stage_All, G_Access_All ); return buffer; } //- Viewport / scissor -Rng3 GPU_ViewportFromTexture(GPU_ResourceHandle texture) +Rng3 G_ViewportFromTexture(G_ResourceHandle texture) { - Vec2I32 dims = GPU_Count2D(texture); + Vec2I32 dims = G_Count2D(texture); return RNG3(VEC3(0, 0, 0), VEC3(dims.x, dims.y, 1)); } -Rng2 GPU_ScissorFromTexture(GPU_ResourceHandle texture) +Rng2 G_ScissorFromTexture(G_ResourceHandle texture) { - Vec2I32 dims = GPU_Count2D(texture); + Vec2I32 dims = G_Count2D(texture); return RNG2(VEC2(0, 0), VEC2(dims.x, dims.y)); } //- Shared resources -GPU_IndexBufferDesc GPU_GetSharedQuadIndices(void) +G_IndexBufferDesc G_GetSharedQuadIndices(void) { - return GPU_shared_util_state.quad_indices; + return G_shared_util_state.quad_indices; } diff --git a/src/gpu/gpu_common.h b/src/gpu/gpu_common.h index 99ed4a89..ef82ad65 100644 --- a/src/gpu/gpu_common.h +++ b/src/gpu/gpu_common.h @@ -1,35 +1,35 @@ //////////////////////////////////////////////////////////// //~ State types -Struct(GPU_SharedUtilState) +Struct(G_SharedUtilState) { /* Common shared resources */ - GPU_IndexBufferDesc quad_indices; -} extern GPU_shared_util_state; + G_IndexBufferDesc quad_indices; +} extern G_shared_util_state; -extern ThreadLocal GPU_ArenaHandle GPU_t_perm_arena; +extern ThreadLocal G_ArenaHandle G_t_perm_arena; //////////////////////////////////////////////////////////// //~ Bootstrap -void GPU_BootstrapExtra(void); +void G_BootstrapExtra(void); //////////////////////////////////////////////////////////// //~ Helpers //- Arena -GPU_ArenaHandle GPU_PermArena(void); +G_ArenaHandle G_PermArena(void); //- Cpu -> Gpu copy -GPU_ResourceHandle GPU_PushBufferFromCpu(GPU_ArenaHandle gpu_arena, GPU_CommandListHandle cl, String src); +G_ResourceHandle G_PushBufferFromCpu(G_ArenaHandle gpu_arena, G_CommandListHandle cl, String src); //- Viewport / scissor -Rng3 GPU_ViewportFromTexture(GPU_ResourceHandle texture); -Rng2 GPU_ScissorFromTexture(GPU_ResourceHandle texture); +Rng3 G_ViewportFromTexture(G_ResourceHandle texture); +Rng2 G_ScissorFromTexture(G_ResourceHandle texture); //- Shared resources -GPU_IndexBufferDesc GPU_GetSharedQuadIndices(void); +G_IndexBufferDesc G_GetSharedQuadIndices(void); diff --git a/src/gpu/gpu_core.h b/src/gpu/gpu_core.h index a8c7b308..deecec6f 100644 --- a/src/gpu/gpu_core.h +++ b/src/gpu/gpu_core.h @@ -1,33 +1,33 @@ //////////////////////////////////////////////////////////// //~ Handle types -Struct(GPU_ArenaHandle) { u64 v; }; -Struct(GPU_CommandListHandle) { u64 v; }; -Struct(GPU_ResourceHandle) { u64 v; }; -Struct(GPU_SwapchainHandle) { u64 v; }; +Struct(G_ArenaHandle) { u64 v; }; +Struct(G_CommandListHandle) { u64 v; }; +Struct(G_ResourceHandle) { u64 v; }; +Struct(G_SwapchainHandle) { u64 v; }; -#define GPU_IsArenaNil(h) ((h).v == 0) -#define GPU_IsCommandListNil(h) ((h).v == 0) -#define GPU_IsResourceNil(h) ((h).v == 0) -#define GPU_IsSwapchainNil(h) ((h).v == 0) +#define G_IsArenaNil(h) ((h).v == 0) +#define G_IsCommandListNil(h) ((h).v == 0) +#define G_IsResourceNil(h) ((h).v == 0) +#define G_IsSwapchainNil(h) ((h).v == 0) //////////////////////////////////////////////////////////// //~ Queue types -#define GPU_IsMultiQueueEnabled 1 +#define G_IsMultiQueueEnabled 1 -Enum(GPU_QueueKind) +Enum(G_QueueKind) { -#if GPU_IsMultiQueueEnabled - GPU_QueueKind_Direct = 0, - GPU_QueueKind_AsyncCompute = 1, - GPU_QueueKind_AsyncCopy = 2, - GPU_NumQueues = 3 +#if G_IsMultiQueueEnabled + G_QueueKind_Direct = 0, + G_QueueKind_AsyncCompute = 1, + G_QueueKind_AsyncCopy = 2, + G_NumQueues = 3 #else - GPU_QueueKind_Direct = 0, - GPU_QueueKind_AsyncCompute = 0, - GPU_QueueKind_AsyncCopy = 0, - GPU_NumQueues = 1 + G_QueueKind_Direct = 0, + G_QueueKind_AsyncCompute = 0, + G_QueueKind_AsyncCopy = 0, + G_NumQueues = 1 #endif }; @@ -35,193 +35,193 @@ Enum(GPU_QueueKind) //~ Format types /* NOTE: Matches DirectX DXGI_FORMAT */ -Enum(GPU_Format) +Enum(G_Format) { - GPU_Format_Unknown = 0, - GPU_Format_R32G32B32A32_Typeless = 1, - GPU_Format_R32G32B32A32_Float = 2, - GPU_Format_R32G32B32A32_Uint = 3, - GPU_Format_R32G32B32A32_Sint = 4, - GPU_Format_R32G32B32_Typeless = 5, - GPU_Format_R32G32B32_Float = 6, - GPU_Format_R32G32B32_Uint = 7, - GPU_Format_R32G32B32_Sint = 8, - GPU_Format_R16G16B16A16_Typeless = 9, - GPU_Format_R16G16B16A16_Float = 10, - GPU_Format_R16G16B16A16_Unorm = 11, - GPU_Format_R16G16B16A16_Uint = 12, - GPU_Format_R16G16B16A16_Snorm = 13, - GPU_Format_R16G16B16A16_Sint = 14, - GPU_Format_R32G32_Typeless = 15, - GPU_Format_R32G32_Float = 16, - GPU_Format_R32G32_Uint = 17, - GPU_Format_R32G32_Sint = 18, - GPU_Format_R32G8X24_Typeless = 19, - GPU_Format_D32_Float_S8X24_Uint = 20, - GPU_Format_R32_Float_X8X24_Typeless = 21, - GPU_Format_X32_Typeless_G8X24_Uint = 22, - GPU_Format_R10G10B10A2_Typeless = 23, - GPU_Format_R10G10B10A2_Unorm = 24, - GPU_Format_R10G10B10A2_Uint = 25, - GPU_Format_R11G11B10_Float = 26, - GPU_Format_R8G8B8A8_Typeless = 27, - GPU_Format_R8G8B8A8_Unorm = 28, - GPU_Format_R8G8B8A8_Unorm_Srgb = 29, - GPU_Format_R8G8B8A8_Uint = 30, - GPU_Format_R8G8B8A8_Snorm = 31, - GPU_Format_R8G8B8A8_Sint = 32, - GPU_Format_R16G16_Typeless = 33, - GPU_Format_R16G16_Float = 34, - GPU_Format_R16G16_Unorm = 35, - GPU_Format_R16G16_Uint = 36, - GPU_Format_R16G16_Snorm = 37, - GPU_Format_R16G16_Sint = 38, - GPU_Format_R32_Typeless = 39, - GPU_Format_D32_Float = 40, - GPU_Format_R32_Float = 41, - GPU_Format_R32_Uint = 42, - GPU_Format_R32_Sint = 43, - GPU_Format_R24G8_Typeless = 44, - GPU_Format_D24_Unorm_S8_Uint = 45, - GPU_Format_R24_Unorm_X8_Typeless = 46, - GPU_Format_X24_Typeless_G8_Uint = 47, - GPU_Format_R8G8_Typeless = 48, - GPU_Format_R8G8_Unorm = 49, - GPU_Format_R8G8_Uint = 50, - GPU_Format_R8G8_Snorm = 51, - GPU_Format_R8G8_Sint = 52, - GPU_Format_R16_Typeless = 53, - GPU_Format_R16_Float = 54, - GPU_Format_D16_Unorm = 55, - GPU_Format_R16_Unorm = 56, - GPU_Format_R16_Uint = 57, - GPU_Format_R16_Snorm = 58, - GPU_Format_R16_Sint = 59, - GPU_Format_R8_Typeless = 60, - GPU_Format_R8_Unorm = 61, - GPU_Format_R8_Uint = 62, - GPU_Format_R8_Snorm = 63, - GPU_Format_R8_Sint = 64, - GPU_Format_A8_Unorm = 65, - GPU_Format_R1_Unorm = 66, - GPU_Format_R9G9B9E5_SharedXP = 67, - GPU_Format_R8G8_B8G8_Unorm = 68, - GPU_Format_G8R8_G8B8_Unorm = 69, - GPU_Format_BC1_Typeless = 70, - GPU_Format_BC1_Unorm = 71, - GPU_Format_BC1_Unorm_Srgb = 72, - GPU_Format_BC2_Typeless = 73, - GPU_Format_BC2_Unorm = 74, - GPU_Format_BC2_Unorm_Srgb = 75, - GPU_Format_BC3_Typeless = 76, - GPU_Format_BC3_Unorm = 77, - GPU_Format_BC3_Unorm_Srgb = 78, - GPU_Format_BC4_Typeless = 79, - GPU_Format_BC4_Unorm = 80, - GPU_Format_BC4_Snorm = 81, - GPU_Format_BC5_Typeless = 82, - GPU_Format_BC5_Unorm = 83, - GPU_Format_BC5_Snorm = 84, - GPU_Format_B5G6R5_Unorm = 85, - GPU_Format_B5G5R5A1_Unorm = 86, - GPU_Format_B8G8R8A8_Unorm = 87, - GPU_Format_B8G8R8X8_Unorm = 88, - GPU_Format_R10G10B10_XR_BIAS_A2_Unorm = 89, - GPU_Format_B8G8R8A8_Typeless = 90, - GPU_Format_B8G8R8A8_Unorm_Srgb = 91, - GPU_Format_B8G8R8X8_Typeless = 92, - GPU_Format_B8G8R8X8_Unorm_Srgb = 93, - GPU_Format_BC6H_Typeless = 94, - GPU_Format_BC6H_UF16 = 95, - GPU_Format_BC6H_SF16 = 96, - GPU_Format_BC7_Typeless = 97, - GPU_Format_BC7_Unorm = 98, - GPU_Format_BC7_Unorm_Srgb = 99, - GPU_Format_AYUV = 100, - GPU_Format_Y410 = 101, - GPU_Format_Y416 = 102, - GPU_Format_NV12 = 103, - GPU_Format_P010 = 104, - GPU_Format_P016 = 105, - GPU_Format_420_Opaque = 106, - GPU_Format_YUY2 = 107, - GPU_Format_Y210 = 108, - GPU_Format_Y216 = 109, - GPU_Format_NV11 = 110, - GPU_Format_AI44 = 111, - GPU_Format_IA44 = 112, - GPU_Format_P8 = 113, - GPU_Format_A8P8 = 114, - GPU_Format_B4G4R4A4_Unorm = 115, - GPU_Format_P208 = 130, - GPU_Format_V208 = 131, - GPU_Format_V408 = 132, - GPU_Format_SamplerFeedbackMinMipOpaque = 189, - GPU_Format_SamplerFeedbackMipRegionUsedOpaque = 190, - GPU_Format_A4B4G4R4_Unorm = 191, - GPU_Format_Count = 192 + G_Format_Unknown = 0, + G_Format_R32G32B32A32_Typeless = 1, + G_Format_R32G32B32A32_Float = 2, + G_Format_R32G32B32A32_Uint = 3, + G_Format_R32G32B32A32_Sint = 4, + G_Format_R32G32B32_Typeless = 5, + G_Format_R32G32B32_Float = 6, + G_Format_R32G32B32_Uint = 7, + G_Format_R32G32B32_Sint = 8, + G_Format_R16G16B16A16_Typeless = 9, + G_Format_R16G16B16A16_Float = 10, + G_Format_R16G16B16A16_Unorm = 11, + G_Format_R16G16B16A16_Uint = 12, + G_Format_R16G16B16A16_Snorm = 13, + G_Format_R16G16B16A16_Sint = 14, + G_Format_R32G32_Typeless = 15, + G_Format_R32G32_Float = 16, + G_Format_R32G32_Uint = 17, + G_Format_R32G32_Sint = 18, + G_Format_R32G8X24_Typeless = 19, + G_Format_D32_Float_S8X24_Uint = 20, + G_Format_R32_Float_X8X24_Typeless = 21, + G_Format_X32_Typeless_G8X24_Uint = 22, + G_Format_R10G10B10A2_Typeless = 23, + G_Format_R10G10B10A2_Unorm = 24, + G_Format_R10G10B10A2_Uint = 25, + G_Format_R11G11B10_Float = 26, + G_Format_R8G8B8A8_Typeless = 27, + G_Format_R8G8B8A8_Unorm = 28, + G_Format_R8G8B8A8_Unorm_Srgb = 29, + G_Format_R8G8B8A8_Uint = 30, + G_Format_R8G8B8A8_Snorm = 31, + G_Format_R8G8B8A8_Sint = 32, + G_Format_R16G16_Typeless = 33, + G_Format_R16G16_Float = 34, + G_Format_R16G16_Unorm = 35, + G_Format_R16G16_Uint = 36, + G_Format_R16G16_Snorm = 37, + G_Format_R16G16_Sint = 38, + G_Format_R32_Typeless = 39, + G_Format_D32_Float = 40, + G_Format_R32_Float = 41, + G_Format_R32_Uint = 42, + G_Format_R32_Sint = 43, + G_Format_R24G8_Typeless = 44, + G_Format_D24_Unorm_S8_Uint = 45, + G_Format_R24_Unorm_X8_Typeless = 46, + G_Format_X24_Typeless_G8_Uint = 47, + G_Format_R8G8_Typeless = 48, + G_Format_R8G8_Unorm = 49, + G_Format_R8G8_Uint = 50, + G_Format_R8G8_Snorm = 51, + G_Format_R8G8_Sint = 52, + G_Format_R16_Typeless = 53, + G_Format_R16_Float = 54, + G_Format_D16_Unorm = 55, + G_Format_R16_Unorm = 56, + G_Format_R16_Uint = 57, + G_Format_R16_Snorm = 58, + G_Format_R16_Sint = 59, + G_Format_R8_Typeless = 60, + G_Format_R8_Unorm = 61, + G_Format_R8_Uint = 62, + G_Format_R8_Snorm = 63, + G_Format_R8_Sint = 64, + G_Format_A8_Unorm = 65, + G_Format_R1_Unorm = 66, + G_Format_R9G9B9E5_SharedXP = 67, + G_Format_R8G8_B8G8_Unorm = 68, + G_Format_G8R8_G8B8_Unorm = 69, + G_Format_BC1_Typeless = 70, + G_Format_BC1_Unorm = 71, + G_Format_BC1_Unorm_Srgb = 72, + G_Format_BC2_Typeless = 73, + G_Format_BC2_Unorm = 74, + G_Format_BC2_Unorm_Srgb = 75, + G_Format_BC3_Typeless = 76, + G_Format_BC3_Unorm = 77, + G_Format_BC3_Unorm_Srgb = 78, + G_Format_BC4_Typeless = 79, + G_Format_BC4_Unorm = 80, + G_Format_BC4_Snorm = 81, + G_Format_BC5_Typeless = 82, + G_Format_BC5_Unorm = 83, + G_Format_BC5_Snorm = 84, + G_Format_B5G6R5_Unorm = 85, + G_Format_B5G5R5A1_Unorm = 86, + G_Format_B8G8R8A8_Unorm = 87, + G_Format_B8G8R8X8_Unorm = 88, + G_Format_R10G10B10_XR_BIAS_A2_Unorm = 89, + G_Format_B8G8R8A8_Typeless = 90, + G_Format_B8G8R8A8_Unorm_Srgb = 91, + G_Format_B8G8R8X8_Typeless = 92, + G_Format_B8G8R8X8_Unorm_Srgb = 93, + G_Format_BC6H_Typeless = 94, + G_Format_BC6H_UF16 = 95, + G_Format_BC6H_SF16 = 96, + G_Format_BC7_Typeless = 97, + G_Format_BC7_Unorm = 98, + G_Format_BC7_Unorm_Srgb = 99, + G_Format_AYUV = 100, + G_Format_Y410 = 101, + G_Format_Y416 = 102, + G_Format_NV12 = 103, + G_Format_P010 = 104, + G_Format_P016 = 105, + G_Format_420_Opaque = 106, + G_Format_YUY2 = 107, + G_Format_Y210 = 108, + G_Format_Y216 = 109, + G_Format_NV11 = 110, + G_Format_AI44 = 111, + G_Format_IA44 = 112, + G_Format_P8 = 113, + G_Format_A8P8 = 114, + G_Format_B4G4R4A4_Unorm = 115, + G_Format_P208 = 130, + G_Format_V208 = 131, + G_Format_V408 = 132, + G_Format_SamplerFeedbackMinMipOpaque = 189, + G_Format_SamplerFeedbackMipRegionUsedOpaque = 190, + G_Format_A4B4G4R4_Unorm = 191, + G_Format_Count = 192 }; //////////////////////////////////////////////////////////// //~ Barrier types -Enum(GPU_Stage) +Enum(G_Stage) { - GPU_Stage_None = 0, + G_Stage_None = 0, /* Compute stages */ - GPU_Stage_ComputeShading = (1 << 1), + G_Stage_ComputeShading = (1 << 1), /* Draw stages */ - GPU_Stage_IndexAssembly = (1 << 2), - GPU_Stage_VertexShading = (1 << 3), - GPU_Stage_PixelShading = (1 << 4), - GPU_Stage_DepthStencil = (1 << 5), - GPU_Stage_RenderTarget = (1 << 6), + G_Stage_IndexAssembly = (1 << 2), + G_Stage_VertexShading = (1 << 3), + G_Stage_PixelShading = (1 << 4), + G_Stage_DepthStencil = (1 << 5), + G_Stage_RenderTarget = (1 << 6), /* Copy stages */ - GPU_Stage_Copy = (1 << 7), + G_Stage_Copy = (1 << 7), /* Indirect stages */ - GPU_Stage_Indirect = (1 << 8), + G_Stage_Indirect = (1 << 8), /* Aggregate stages */ - GPU_Stage_AllDraw = GPU_Stage_IndexAssembly | - GPU_Stage_VertexShading | - GPU_Stage_PixelShading | - GPU_Stage_DepthStencil | - GPU_Stage_RenderTarget, + G_Stage_AllDraw = G_Stage_IndexAssembly | + G_Stage_VertexShading | + G_Stage_PixelShading | + G_Stage_DepthStencil | + G_Stage_RenderTarget, - GPU_Stage_AllShading = GPU_Stage_ComputeShading | - GPU_Stage_VertexShading | - GPU_Stage_PixelShading, + G_Stage_AllShading = G_Stage_ComputeShading | + G_Stage_VertexShading | + G_Stage_PixelShading, - GPU_Stage_All = 0xFFFFFFFF + G_Stage_All = 0xFFFFFFFF }; -Enum(GPU_Access) +Enum(G_Access) { - GPU_Access_None = 0, + G_Access_None = 0, - GPU_Access_ShaderReadWrite = (1 << 1), - GPU_Access_ShaderRead = (1 << 2), + G_Access_ShaderReadWrite = (1 << 1), + G_Access_ShaderRead = (1 << 2), - GPU_Access_CopyWrite = (1 << 3), - GPU_Access_CopyRead = (1 << 4), + G_Access_CopyWrite = (1 << 3), + G_Access_CopyRead = (1 << 4), - GPU_Access_DepthStencilRead = (1 << 5), - GPU_Access_DepthStencilWrite = (1 << 6), - GPU_Access_RenderTargetWrite = (1 << 7), + G_Access_DepthStencilRead = (1 << 5), + G_Access_DepthStencilWrite = (1 << 6), + G_Access_RenderTargetWrite = (1 << 7), - GPU_Access_IndexBuffer = (1 << 8), - GPU_Access_IndirectArgument = (1 << 9), + G_Access_IndexBuffer = (1 << 8), + G_Access_IndirectArgument = (1 << 9), - GPU_Access_All = 0xFFFFFFFF + G_Access_All = 0xFFFFFFFF }; -Enum(GPU_Layout) +Enum(G_Layout) { - GPU_Layout_NoChange, + G_Layout_NoChange, /* Allows a resource to be used on any queue with any access type, as long * as there is only one writer at a time, and the writer is not writing to @@ -230,47 +230,47 @@ Enum(GPU_Layout) * Resources cannot transition to/from this layout. They must be created * with it and are locked to it. */ - GPU_Layout_Simultaneous, /* D3D12_BARRIER_LAYOUT_COMMON + D3D12_RESOURCE_FLAG_ALLOW_SIMULTANEOUS_ACCESS */ + G_Layout_Simultaneous, /* D3D12_BARRIER_LAYOUT_COMMON + D3D12_RESOURCE_FLAG_ALLOW_SIMULTANEOUS_ACCESS */ - GPU_Layout_Undefined, /* D3D12_BARRIER_LAYOUT_UNDEFINED */ + G_Layout_Undefined, /* D3D12_BARRIER_LAYOUT_UNDEFINED */ ////////////////////////////// //- Queue-agnostic - GPU_Layout_AnyQueue_ShaderRead_CopyRead_CopyWrite_Present, /* D3D12_BARRIER_LAYOUT_COMMON */ + G_Layout_AnyQueue_ShaderRead_CopyRead_CopyWrite_Present, /* D3D12_BARRIER_LAYOUT_COMMON */ ////////////////////////////// //- Direct & Compute queue - GPU_Layout_DirectComputeQueue_ShaderRead_CopyRead, /* D3D12_BARRIER_LAYOUT_GENERIC_READ */ + G_Layout_DirectComputeQueue_ShaderRead_CopyRead, /* D3D12_BARRIER_LAYOUT_GENERIC_READ */ - GPU_Layout_DirectComputeQueue_ShaderReadWrite, /* D3D12_BARRIER_LAYOUT_UNORDERED_ACCESS */ - GPU_Layout_DirectComputeQueue_ShaderRead, /* D3D12_BARRIER_LAYOUT_SHADER_RESOURCE */ - GPU_Layout_DirectComputeQueue_CopyRead, /* D3D12_BARRIER_LAYOUT_COPY_SOURCE */ + G_Layout_DirectComputeQueue_ShaderReadWrite, /* D3D12_BARRIER_LAYOUT_UNORDERED_ACCESS */ + G_Layout_DirectComputeQueue_ShaderRead, /* D3D12_BARRIER_LAYOUT_SHADER_RESOURCE */ + G_Layout_DirectComputeQueue_CopyRead, /* D3D12_BARRIER_LAYOUT_COPY_SOURCE */ ////////////////////////////// //- Direct queue - GPU_Layout_DirectQueue_ShaderRead_ShaderReadWrite_CopyRead_CopyWrite, /* D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_COMMON */ - GPU_Layout_DirectQueue_ShaderRead_CopyRead_DepthStencilRead, /* D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_GENERIC_READ */ + G_Layout_DirectQueue_ShaderRead_ShaderReadWrite_CopyRead_CopyWrite, /* D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_COMMON */ + G_Layout_DirectQueue_ShaderRead_CopyRead_DepthStencilRead, /* D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_GENERIC_READ */ - GPU_Layout_DirectQueue_ShaderReadWrite, /* D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_UNORDERED_ACCESS */ - GPU_Layout_DirectQueue_ShaderRead, /* D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_SHADER_RESOURCE */ - GPU_Layout_DirectQueue_CopyRead, /* D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_COPY_SOURCE */ + G_Layout_DirectQueue_ShaderReadWrite, /* D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_UNORDERED_ACCESS */ + G_Layout_DirectQueue_ShaderRead, /* D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_SHADER_RESOURCE */ + G_Layout_DirectQueue_CopyRead, /* D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_COPY_SOURCE */ - GPU_Layout_DirectQueue_DepthStencilRead_DepthStencilWrite, /* D3D12_BARRIER_LAYOUT_DEPTH_STENCIL_WRITE */ - GPU_Layout_DirectQueue_DepthStencilRead, /* D3D12_BARRIER_LAYOUT_DEPTH_STENCIL_READ */ - GPU_Layout_DirectQueue_RenderTargetWrite, /* D3D12_BARRIER_LAYOUT_RENDER_TARGET */ + G_Layout_DirectQueue_DepthStencilRead_DepthStencilWrite, /* D3D12_BARRIER_LAYOUT_DEPTH_STENCIL_WRITE */ + G_Layout_DirectQueue_DepthStencilRead, /* D3D12_BARRIER_LAYOUT_DEPTH_STENCIL_READ */ + G_Layout_DirectQueue_RenderTargetWrite, /* D3D12_BARRIER_LAYOUT_RENDER_TARGET */ ////////////////////////////// //- Compute queue - GPU_Layout_ComputeQueue_ShaderRead_ShaderReadWrite_CopyRead_CopyWrite, /* D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_COMMON */ - GPU_Layout_ComputeQueue_ShaderRead_CopyRead, /* D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_GENERIC_READ */ + G_Layout_ComputeQueue_ShaderRead_ShaderReadWrite_CopyRead_CopyWrite, /* D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_COMMON */ + G_Layout_ComputeQueue_ShaderRead_CopyRead, /* D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_GENERIC_READ */ - GPU_Layout_ComputeQueue_ShaderReadWrite, /* D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_UNORDERED_ACCESS */ - GPU_Layout_ComputeQueue_ShaderRead, /* D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_SHADER_RESOURCE */ - GPU_Layout_ComputeQueue_CopyRead, /* D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_COPY_SOURCE */ + G_Layout_ComputeQueue_ShaderReadWrite, /* D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_UNORDERED_ACCESS */ + G_Layout_ComputeQueue_ShaderRead, /* D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_SHADER_RESOURCE */ + G_Layout_ComputeQueue_CopyRead, /* D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_COPY_SOURCE */ }; /* Barrier will execute after previous stages specified by `sync_prev`, and before next stages specified by `sync_next`. @@ -278,56 +278,56 @@ Enum(GPU_Layout) * - Necessary resource flushes will occur based on `access_prev` & `access_next` * - Texture layout will transition based on `layout` (if specified) */ -Struct(GPU_BarrierDesc) +Struct(G_BarrierDesc) { - GPU_ResourceHandle resource; + G_ResourceHandle resource; b32 is_global; - GPU_Stage sync_prev; - GPU_Stage sync_next; - GPU_Access access_prev; - GPU_Access access_next; - GPU_Layout layout; + G_Stage sync_prev; + G_Stage sync_next; + G_Access access_prev; + G_Access access_next; + G_Layout layout; }; //////////////////////////////////////////////////////////// //~ Resource types -Enum(GPU_ResourceFlag) +Enum(G_ResourceFlag) { - GPU_ResourceFlag_None = 0, - GPU_ResourceFlag_AllowShaderReadWrite = (1 << 0), - GPU_ResourceFlag_AllowRenderTarget = (1 << 1), - GPU_ResourceFlag_AllowDepthStencil = (1 << 2), + G_ResourceFlag_None = 0, + G_ResourceFlag_AllowShaderReadWrite = (1 << 0), + G_ResourceFlag_AllowRenderTarget = (1 << 1), + G_ResourceFlag_AllowDepthStencil = (1 << 2), }; //////////////////////////////////////////////////////////// //~ Buffer types -Struct(GPU_BufferResourceDesc) +Struct(G_BufferResourceDesc) { u64 size; - GPU_ResourceFlag flags; + G_ResourceFlag flags; }; //////////////////////////////////////////////////////////// //~ Texture types -#define GPU_MaxRenderTargets 8 +#define G_MaxRenderTargets 8 -Enum(GPU_TextureKind) +Enum(G_TextureKind) { - GPU_TextureKind_1D, - GPU_TextureKind_2D, - GPU_TextureKind_3D, + G_TextureKind_1D, + G_TextureKind_2D, + G_TextureKind_3D, }; -Struct(GPU_TextureResourceDesc) +Struct(G_TextureResourceDesc) { - GPU_TextureKind kind; - GPU_ResourceFlag flags; - GPU_Format format; + G_TextureKind kind; + G_ResourceFlag flags; + G_Format format; Vec3I32 dims; - GPU_Layout initial_layout; + G_Layout initial_layout; i32 mip_levels; /* Will be clamped to range [1, inf) */ Vec4 clear_color; }; @@ -336,90 +336,90 @@ Struct(GPU_TextureResourceDesc) //~ Sampler types /* NOTE: Matches DirectX D3D12_FILTER */ -Enum(GPU_Filter) +Enum(G_Filter) { /* Standard filter */ - GPU_Filter_MinMagMipPoint = 0, - GPU_Filter_MinMagPointMipLinear = 0x1, - GPU_Filter_MinPointMagLinearMipPoint = 0x4, - GPU_Filter_MinPointMagMipLinear = 0x5, - GPU_Filter_MinLinearMagMipPoint = 0x10, - GPU_Filter_MinLinearMagPointMipLinear = 0x11, - GPU_Filter_MinMagLinearMipPoint = 0x14, - GPU_Filter_MinMagMipLinear = 0x15, - GPU_Filter_MinMagAnisotropicMipPoint = 0x54, - GPU_Filter_Anisotropic = 0x55, + G_Filter_MinMagMipPoint = 0, + G_Filter_MinMagPointMipLinear = 0x1, + G_Filter_MinPointMagLinearMipPoint = 0x4, + G_Filter_MinPointMagMipLinear = 0x5, + G_Filter_MinLinearMagMipPoint = 0x10, + G_Filter_MinLinearMagPointMipLinear = 0x11, + G_Filter_MinMagLinearMipPoint = 0x14, + G_Filter_MinMagMipLinear = 0x15, + G_Filter_MinMagAnisotropicMipPoint = 0x54, + G_Filter_Anisotropic = 0x55, /* Comparison filter */ - GPU_Filter_Comparison_MinMagMipPoint = 0x80, - GPU_Filter_Comparison_MinMagPointMipLinear = 0x81, - GPU_Filter_Comparison_MinPointMagLinearMipPoint = 0x84, - GPU_Filter_Comparison_MinPointMagMipLinear = 0x85, - GPU_Filter_Comparison_MinLinearMagMipPoint = 0x90, - GPU_Filter_Comparison_MinLinearMagPointMipLinear = 0x91, - GPU_Filter_Comparison_MinMagLinearMipPoint = 0x94, - GPU_Filter_Comparison_MinMagMipLinear = 0x95, - GPU_Filter_Comparison_MinMagAnisotropicMipPoint = 0xd4, - GPU_Filter_Comparison_Anisotropic = 0xd5, + G_Filter_Comparison_MinMagMipPoint = 0x80, + G_Filter_Comparison_MinMagPointMipLinear = 0x81, + G_Filter_Comparison_MinPointMagLinearMipPoint = 0x84, + G_Filter_Comparison_MinPointMagMipLinear = 0x85, + G_Filter_Comparison_MinLinearMagMipPoint = 0x90, + G_Filter_Comparison_MinLinearMagPointMipLinear = 0x91, + G_Filter_Comparison_MinMagLinearMipPoint = 0x94, + G_Filter_Comparison_MinMagMipLinear = 0x95, + G_Filter_Comparison_MinMagAnisotropicMipPoint = 0xd4, + G_Filter_Comparison_Anisotropic = 0xd5, /* Minimum filter */ - GPU_Filter_Minimum_MinMagMipPoint = 0x100, - GPU_Filter_Minimum_MinMagPointMipLinear = 0x101, - GPU_Filter_Minimum_MinPointMagLinearMipPoint = 0x104, - GPU_Filter_Minimum_MinPointMagMipLinear = 0x105, - GPU_Filter_Minimum_MinLinearMagMipPoint = 0x110, - GPU_Filter_Minimum_MinLinearMagPointMipLinear = 0x111, - GPU_Filter_Minimum_MinMagLinearMipPoint = 0x114, - GPU_Filter_Minimum_MinMagMipLinear = 0x115, - GPU_Filter_Minimum_MinMagAnisotropicMipPoint = 0x155, - GPU_Filter_Minimum_Anisotropic = 0x155, + G_Filter_Minimum_MinMagMipPoint = 0x100, + G_Filter_Minimum_MinMagPointMipLinear = 0x101, + G_Filter_Minimum_MinPointMagLinearMipPoint = 0x104, + G_Filter_Minimum_MinPointMagMipLinear = 0x105, + G_Filter_Minimum_MinLinearMagMipPoint = 0x110, + G_Filter_Minimum_MinLinearMagPointMipLinear = 0x111, + G_Filter_Minimum_MinMagLinearMipPoint = 0x114, + G_Filter_Minimum_MinMagMipLinear = 0x115, + G_Filter_Minimum_MinMagAnisotropicMipPoint = 0x155, + G_Filter_Minimum_Anisotropic = 0x155, /* Maximum filter */ - GPU_Filter_Maximum_MinMagMipPoint = 0x180, - GPU_Filter_Maximum_MinMagPointMipLinear = 0x181, - GPU_Filter_Maximum_MinPointMagLinearMipPoint = 0x184, - GPU_Filter_Maximum_MinPointMagMipLinear = 0x185, - GPU_Filter_Maximum_MinLinearMagMipPoint = 0x190, - GPU_Filter_Maximum_MinLinearMagPointMipLinear = 0x191, - GPU_Filter_Maximum_MinMagLinearMipPoint = 0x194, - GPU_Filter_Maximum_MinMagMipLinear = 0x195, - GPU_Filter_Maximum_MinMagAnisotropicMipPoint = 0x1d4, - GPU_Filter_Maximum_Anisotropic = 0x1d5 + G_Filter_Maximum_MinMagMipPoint = 0x180, + G_Filter_Maximum_MinMagPointMipLinear = 0x181, + G_Filter_Maximum_MinPointMagLinearMipPoint = 0x184, + G_Filter_Maximum_MinPointMagMipLinear = 0x185, + G_Filter_Maximum_MinLinearMagMipPoint = 0x190, + G_Filter_Maximum_MinLinearMagPointMipLinear = 0x191, + G_Filter_Maximum_MinMagLinearMipPoint = 0x194, + G_Filter_Maximum_MinMagMipLinear = 0x195, + G_Filter_Maximum_MinMagAnisotropicMipPoint = 0x1d4, + G_Filter_Maximum_Anisotropic = 0x1d5 }; /* NOTE: Matches DirectX D3D12_TEXTURE_ADDRESS_MODE */ -Enum(GPU_AddressMode) +Enum(G_AddressMode) { - GPU_AddressMode_Wrap = 1, - GPU_AddressMode_Mirror = 2, - GPU_AddressMode_Clamp = 3, /* Default */ - GPU_AddressMode_Border = 4, - GPU_AddressMode_MirrorOnce = 5 + G_AddressMode_Wrap = 1, + G_AddressMode_Mirror = 2, + G_AddressMode_Clamp = 3, /* Default */ + G_AddressMode_Border = 4, + G_AddressMode_MirrorOnce = 5 }; /* NOTE: Matches DirectX D3D12_COMPARISON_FUNC */ -Enum(GPU_ComparisonFunc) +Enum(G_ComparisonFunc) { - GPU_ComparisonFunc_None = 0, - GPU_ComparisonFunc_Never = 1, - GPU_ComparisonFunc_Less = 2, - GPU_ComparisonFunc_Equal = 3, - GPU_ComparisonFunc_LessEqual = 4, - GPU_ComparisonFunc_Greater = 5, - GPU_ComparisonFunc_NotEqual = 6, - GPU_ComparisonFunc_GreaterEqual = 7, - GPU_ComparisonFunc_Always = 8 + G_ComparisonFunc_None = 0, + G_ComparisonFunc_Never = 1, + G_ComparisonFunc_Less = 2, + G_ComparisonFunc_Equal = 3, + G_ComparisonFunc_LessEqual = 4, + G_ComparisonFunc_Greater = 5, + G_ComparisonFunc_NotEqual = 6, + G_ComparisonFunc_GreaterEqual = 7, + G_ComparisonFunc_Always = 8 }; -Struct(GPU_SamplerResourceDesc) +Struct(G_SamplerResourceDesc) { - GPU_Filter filter; - GPU_AddressMode x; - GPU_AddressMode y; - GPU_AddressMode z; + G_Filter filter; + G_AddressMode x; + G_AddressMode y; + G_AddressMode z; f32 mip_lod_bias; u32 max_anisotropy; - GPU_ComparisonFunc comparison; + G_ComparisonFunc comparison; Vec4 border_color; f32 min_lod; f32 max_lod; @@ -428,7 +428,7 @@ Struct(GPU_SamplerResourceDesc) //////////////////////////////////////////////////////////// //~ GPU pointer types -Struct(GPU_RefDesc) +Struct(G_RefDesc) { ShaderRefKind kind; u64 element_size; @@ -445,21 +445,21 @@ Struct(GPU_RefDesc) //////////////////////////////////////////////////////////// //~ Rasterization types -Enum(GPU_RasterMode) +Enum(G_RasterMode) { - GPU_RasterMode_None, - GPU_RasterMode_PointList, - GPU_RasterMode_LineList, - GPU_RasterMode_LineStrip, - GPU_RasterMode_TriangleList, - GPU_RasterMode_TriangleStrip, - GPU_RasterMode_WireTriangleList, - GPU_RasterMode_WireTriangleStrip, + G_RasterMode_None, + G_RasterMode_PointList, + G_RasterMode_LineList, + G_RasterMode_LineStrip, + G_RasterMode_TriangleList, + G_RasterMode_TriangleStrip, + G_RasterMode_WireTriangleList, + G_RasterMode_WireTriangleStrip, }; -Struct(GPU_IndexBufferDesc) +Struct(G_IndexBufferDesc) { - GPU_ResourceHandle resource; + G_ResourceHandle resource; u32 index_size; /* Either 2 for u16 indices, or 4 for u32 indices */ u32 index_count; }; @@ -467,26 +467,26 @@ Struct(GPU_IndexBufferDesc) //////////////////////////////////////////////////////////// //~ Synchronization types -Enum(GPU_FenceOpKind) +Enum(G_FenceOpKind) { - GPU_FenceOpKind_Set, - GPU_FenceOpKind_Add, + G_FenceOpKind_Set, + G_FenceOpKind_Add, }; -Struct(GPU_FenceOp) +Struct(G_FenceOp) { - GPU_FenceOpKind kind; + G_FenceOpKind kind; Fence *fence; i64 v; }; -#define GPU_SetFence(_fence, _v) ((GPU_FenceOp) { .kind = GPU_FenceOpKind_Set, .fence = (_fence), .v = (_v) }) -#define GPU_AddFence(_fence, _v) ((GPU_FenceOp) { .kind = GPU_FenceOpKind_Add, .fence = (_fence), .v = (_v) }) +#define G_SetFence(_fence, _v) ((G_FenceOp) { .kind = G_FenceOpKind_Set, .fence = (_fence), .v = (_v) }) +#define G_AddFence(_fence, _v) ((G_FenceOp) { .kind = G_FenceOpKind_Add, .fence = (_fence), .v = (_v) }) //////////////////////////////////////////////////////////// //~ Statistic types -Struct(GPU_Stats) +Struct(G_Stats) { /* Memory usage */ u64 local_committed; @@ -504,33 +504,33 @@ Struct(GPU_Stats) //////////////////////////////////////////////////////////// //~ @hookdecl Bootstrap -void GPU_Bootstrap(void); +void G_Bootstrap(void); //////////////////////////////////////////////////////////// //~ @hookdecl Arena -GPU_ArenaHandle GPU_AcquireArena(void); -void GPU_ReleaseArena(GPU_ArenaHandle arena); +G_ArenaHandle G_AcquireArena(void); +void G_ReleaseArena(G_ArenaHandle arena); //////////////////////////////////////////////////////////// //~ @hookdecl Resource //- Resource creation -GPU_ResourceHandle GPU_PushBufferResource(GPU_ArenaHandle arena, GPU_BufferResourceDesc desc); -GPU_ResourceHandle GPU_PushTextureResource(GPU_ArenaHandle arena, GPU_TextureResourceDesc desc); -GPU_ResourceHandle GPU_PushSamplerResource(GPU_ArenaHandle arena, GPU_SamplerResourceDesc desc); +G_ResourceHandle G_PushBufferResource(G_ArenaHandle arena, G_BufferResourceDesc desc); +G_ResourceHandle G_PushTextureResource(G_ArenaHandle arena, G_TextureResourceDesc desc); +G_ResourceHandle G_PushSamplerResource(G_ArenaHandle arena, G_SamplerResourceDesc desc); -#define GPU_PushBuffer(arena, type, count, ...) GPU_PushBufferResource((arena), \ - (GPU_BufferResourceDesc) { \ +#define G_PushBuffer(arena, type, count, ...) G_PushBufferResource((arena), \ + (G_BufferResourceDesc) { \ .size = sizeof(type) * (count), \ __VA_ARGS__ \ } \ ) -#define GPU_PushTexture1D(arena, _format, _size, _initial_layout, ...) GPU_PushTextureResource((arena), \ - (GPU_TextureResourceDesc) { \ - .kind = GPU_TextureKind_1D, \ +#define G_PushTexture1D(arena, _format, _size, _initial_layout, ...) G_PushTextureResource((arena), \ + (G_TextureResourceDesc) { \ + .kind = G_TextureKind_1D, \ .format = (_format), \ .dims = VEC3I32((_size), 1, 1), \ .initial_layout = (_initial_layout), \ @@ -538,9 +538,9 @@ GPU_ResourceHandle GPU_PushSamplerResource(GPU_ArenaHandle arena, GPU_SamplerRes } \ ) -#define GPU_PushTexture2D(arena, _format, _size, _initial_layout, ...) GPU_PushTextureResource((arena), \ - (GPU_TextureResourceDesc) { \ - .kind = GPU_TextureKind_2D, \ +#define G_PushTexture2D(arena, _format, _size, _initial_layout, ...) G_PushTextureResource((arena), \ + (G_TextureResourceDesc) { \ + .kind = G_TextureKind_2D, \ .format = (_format), \ .dims = VEC3I32((_size).x, (_size).y, 1), \ .initial_layout = (_initial_layout), \ @@ -548,9 +548,9 @@ GPU_ResourceHandle GPU_PushSamplerResource(GPU_ArenaHandle arena, GPU_SamplerRes } \ ) -#define GPU_PushTexture3D(arena, _format, _size, _initial_layout, ...) GPU_PushTextureResource((arena), \ - (GPU_TextureResourceDesc) { \ - .kind = GPU_TextureKind_3D, \ +#define G_PushTexture3D(arena, _format, _size, _initial_layout, ...) G_PushTextureResource((arena), \ + (G_TextureResourceDesc) { \ + .kind = G_TextureKind_3D, \ .format = (_format), \ .dims = (_size), \ .initial_layout = (_initial_layout), \ @@ -560,79 +560,79 @@ GPU_ResourceHandle GPU_PushSamplerResource(GPU_ArenaHandle arena, GPU_SamplerRes //- Index buffer helpers -#define GPU_IdxBuff16(_res) ((GPU_IndexBufferDesc) { .resource = (_res), .index_size = 2, .index_count = (GPU_CountBuffer((_res), i16)) }) -#define GPU_IdxBuff32(_res) ((GPU_IndexBufferDesc) { .resource = (_res), .index_size = 4, .index_count = (GPU_CountBuffer((_res), i32)) }) +#define G_IdxBuff16(_res) ((G_IndexBufferDesc) { .resource = (_res), .index_size = 2, .index_count = (G_CountBuffer((_res), i16)) }) +#define G_IdxBuff32(_res) ((G_IndexBufferDesc) { .resource = (_res), .index_size = 4, .index_count = (G_CountBuffer((_res), i32)) }) //- Count -u64 GPU_CountBufferBytes(GPU_ResourceHandle buffer); -i32 GPU_Count1D(GPU_ResourceHandle texture); -Vec2I32 GPU_Count2D(GPU_ResourceHandle texture); -Vec3I32 GPU_Count3D(GPU_ResourceHandle texture); -i32 GPU_CountWidth(GPU_ResourceHandle texture); -i32 GPU_CountHeight(GPU_ResourceHandle texture); -i32 GPU_CountDepth(GPU_ResourceHandle texture); +u64 G_CountBufferBytes(G_ResourceHandle buffer); +i32 G_Count1D(G_ResourceHandle texture); +Vec2I32 G_Count2D(G_ResourceHandle texture); +Vec3I32 G_Count3D(G_ResourceHandle texture); +i32 G_CountWidth(G_ResourceHandle texture); +i32 G_CountHeight(G_ResourceHandle texture); +i32 G_CountDepth(G_ResourceHandle texture); -#define GPU_CountBuffer(buffer, type) GPU_CountBufferBytes(buffer) / sizeof(type) +#define G_CountBuffer(buffer, type) G_CountBufferBytes(buffer) / sizeof(type) //////////////////////////////////////////////////////////// //~ @hookdecl Shader resource references -u32 GPU_PushRef(GPU_ArenaHandle arena, GPU_ResourceHandle resource, GPU_RefDesc desc); +u32 G_PushRef(G_ArenaHandle arena, G_ResourceHandle resource, G_RefDesc desc); -#define GPU_PushStructuredBufferRef(arena, resource, type, ...) (StructuredBufferRef) { \ - .v = GPU_PushRef((arena), (resource), \ - (GPU_RefDesc) { .kind = ShaderRefKind_StructuredBuffer, .element_size = sizeof(type), __VA_ARGS__ }) \ +#define G_PushStructuredBufferRef(arena, resource, type, ...) (StructuredBufferRef) { \ + .v = G_PushRef((arena), (resource), \ + (G_RefDesc) { .kind = ShaderRefKind_StructuredBuffer, .element_size = sizeof(type), __VA_ARGS__ }) \ } -#define GPU_PushRWStructuredBufferRef(arena, resource, type, ...) (RWStructuredBufferRef) { \ - .v = GPU_PushRef((arena), (resource), \ - (GPU_RefDesc) { .kind = ShaderRefKind_RWStructuredBuffer, .element_size = sizeof(type), __VA_ARGS__ }) \ +#define G_PushRWStructuredBufferRef(arena, resource, type, ...) (RWStructuredBufferRef) { \ + .v = G_PushRef((arena), (resource), \ + (G_RefDesc) { .kind = ShaderRefKind_RWStructuredBuffer, .element_size = sizeof(type), __VA_ARGS__ }) \ } -#define GPU_PushByteAddressBufferRef(arena, resource, ...) (ByteAddressBufferRef) { \ - .v = GPU_PushRef((arena), (resource), \ - (GPU_RefDesc) { .kind = ShaderRefKind_ByteAddressBuffer, __VA_ARGS__ }) \ +#define G_PushByteAddressBufferRef(arena, resource, ...) (ByteAddressBufferRef) { \ + .v = G_PushRef((arena), (resource), \ + (G_RefDesc) { .kind = ShaderRefKind_ByteAddressBuffer, __VA_ARGS__ }) \ } -#define GPU_PushRWByteAddressBufferRef(arena, resource, ...) (RWByteAddressBufferRef) { \ - .v = GPU_PushRef((arena), (resource), \ - (GPU_RefDesc) { .kind = ShaderRefKind_RWByteAddressBuffer, __VA_ARGS__ }) \ +#define G_PushRWByteAddressBufferRef(arena, resource, ...) (RWByteAddressBufferRef) { \ + .v = G_PushRef((arena), (resource), \ + (G_RefDesc) { .kind = ShaderRefKind_RWByteAddressBuffer, __VA_ARGS__ }) \ } -#define GPU_PushTexture1DRef(arena, resource, ...) (Texture1DRef) { \ - .v = GPU_PushRef((arena), (resource), \ - (GPU_RefDesc) { .kind = ShaderRefKind_Texture1D, __VA_ARGS__ }) \ +#define G_PushTexture1DRef(arena, resource, ...) (Texture1DRef) { \ + .v = G_PushRef((arena), (resource), \ + (G_RefDesc) { .kind = ShaderRefKind_Texture1D, __VA_ARGS__ }) \ } -#define GPU_PushRWTexture1DRef(arena, resource, ...) (RWTexture1DRef) { \ - .v = GPU_PushRef((arena), (resource), \ - (GPU_RefDesc) { .kind = ShaderRefKind_RWTexture1D, __VA_ARGS__ }) \ +#define G_PushRWTexture1DRef(arena, resource, ...) (RWTexture1DRef) { \ + .v = G_PushRef((arena), (resource), \ + (G_RefDesc) { .kind = ShaderRefKind_RWTexture1D, __VA_ARGS__ }) \ } -#define GPU_PushTexture2DRef(arena, resource, ...) (Texture2DRef) { \ - .v = GPU_PushRef((arena), (resource), \ - (GPU_RefDesc) { .kind = ShaderRefKind_Texture2D, __VA_ARGS__ }) \ +#define G_PushTexture2DRef(arena, resource, ...) (Texture2DRef) { \ + .v = G_PushRef((arena), (resource), \ + (G_RefDesc) { .kind = ShaderRefKind_Texture2D, __VA_ARGS__ }) \ } -#define GPU_PushRWTexture2DRef(arena, resource, ...) (RWTexture2DRef) { \ - .v = GPU_PushRef((arena), (resource), \ - (GPU_RefDesc) { .kind = ShaderRefKind_RWTexture2D, __VA_ARGS__ }) \ +#define G_PushRWTexture2DRef(arena, resource, ...) (RWTexture2DRef) { \ + .v = G_PushRef((arena), (resource), \ + (G_RefDesc) { .kind = ShaderRefKind_RWTexture2D, __VA_ARGS__ }) \ } -#define GPU_PushTexture3DRef(arena, resource, ...) (Texture3DRef) { \ - .v = GPU_PushRef((arena), (resource), \ - (GPU_RefDesc) { .kind = ShaderRefKind_Texture3D, __VA_ARGS__ }) \ +#define G_PushTexture3DRef(arena, resource, ...) (Texture3DRef) { \ + .v = G_PushRef((arena), (resource), \ + (G_RefDesc) { .kind = ShaderRefKind_Texture3D, __VA_ARGS__ }) \ } -#define GPU_PushRWTexture3DRef(arena, resource, ...) (RWTexture3DRef) { \ - .v = GPU_PushRef((arena), (resource), \ - (GPU_RefDesc) { .kind = ShaderRefKind_RWTexture3D, __VA_ARGS__ }) \ +#define G_PushRWTexture3DRef(arena, resource, ...) (RWTexture3DRef) { \ + .v = G_PushRef((arena), (resource), \ + (G_RefDesc) { .kind = ShaderRefKind_RWTexture3D, __VA_ARGS__ }) \ } -#define GPU_PushSamplerStateRef(arena, resource, ...) (SamplerStateRef) { \ - .v = GPU_PushRef((arena), (resource), \ - (GPU_RefDesc) { .kind = ShaderRefKind_SamplerState, __VA_ARGS__ }) \ +#define G_PushSamplerStateRef(arena, resource, ...) (SamplerStateRef) { \ + .v = G_PushRef((arena), (resource), \ + (G_RefDesc) { .kind = ShaderRefKind_SamplerState, __VA_ARGS__ }) \ } //////////////////////////////////////////////////////////// @@ -640,43 +640,43 @@ u32 GPU_PushRef(GPU_ArenaHandle arena, GPU_ResourceHandle resource, GPU_RefDesc //- Command list -GPU_CommandListHandle GPU_PrepareCommandList(GPU_QueueKind queue); -void GPU_CommitCommandListEx(GPU_CommandListHandle cl, u64 fence_ops_count, GPU_FenceOp *fence_ops); +G_CommandListHandle G_PrepareCommandList(G_QueueKind queue); +void G_CommitCommandListEx(G_CommandListHandle cl, u64 fence_ops_count, G_FenceOp *fence_ops); -#define GPU_CommitCommandList(cl) GPU_CommitCommandListEx((cl), 0, 0) +#define G_CommitCommandList(cl) G_CommitCommandListEx((cl), 0, 0) //- Arena -void GPU_ResetArena(GPU_CommandListHandle cl, GPU_ArenaHandle arena); +void G_ResetArena(G_CommandListHandle cl, G_ArenaHandle arena); //- Cpu -> Gpu copy -void GPU_CopyCpuToBuffer(GPU_CommandListHandle cl, GPU_ResourceHandle dst, u64 dst_offset, void *src, RngU64 src_copy_range); -void GPU_CopyCpuToTexture(GPU_CommandListHandle cl, GPU_ResourceHandle dst, Vec3I32 dst_offset, void *src, Vec3I32 src_dims, Rng3I32 src_copy_range); +void G_CopyCpuToBuffer(G_CommandListHandle cl, G_ResourceHandle dst, u64 dst_offset, void *src, RngU64 src_copy_range); +void G_CopyCpuToTexture(G_CommandListHandle cl, G_ResourceHandle dst, Vec3I32 dst_offset, void *src, Vec3I32 src_dims, Rng3I32 src_copy_range); //- Gpu <-> Gpu copy -void GPU_CopyBufferToBuffer(GPU_CommandListHandle cl, GPU_ResourceHandle dst, u64 dst_offset, GPU_ResourceHandle src, RngU64 src_copy_range); -void GPU_CopyBufferToTexture(GPU_CommandListHandle cl, GPU_ResourceHandle dst, Vec3I32 dst_offset, GPU_ResourceHandle src, Vec3I32 src_dims, Rng3I32 src_copy_range); -void GPU_CopyTextureToTexture(GPU_CommandListHandle cl, GPU_ResourceHandle dst, Vec3I32 dst_offset, GPU_ResourceHandle src, Rng3I32 src_copy_range); -void GPU_CopyTextureToBuffer(GPU_CommandListHandle cl, GPU_ResourceHandle dst, Vec3I32 dst_offset, GPU_ResourceHandle src, Rng3I32 src_copy_range); +void G_CopyBufferToBuffer(G_CommandListHandle cl, G_ResourceHandle dst, u64 dst_offset, G_ResourceHandle src, RngU64 src_copy_range); +void G_CopyBufferToTexture(G_CommandListHandle cl, G_ResourceHandle dst, Vec3I32 dst_offset, G_ResourceHandle src, Vec3I32 src_dims, Rng3I32 src_copy_range); +void G_CopyTextureToTexture(G_CommandListHandle cl, G_ResourceHandle dst, Vec3I32 dst_offset, G_ResourceHandle src, Rng3I32 src_copy_range); +void G_CopyTextureToBuffer(G_CommandListHandle cl, G_ResourceHandle dst, Vec3I32 dst_offset, G_ResourceHandle src, Rng3I32 src_copy_range); //- Constant -void GPU_SetConstant_(GPU_CommandListHandle cl, i32 slot, void *src_32bit, u32 size); +void G_SetConstant_(G_CommandListHandle cl, i32 slot, void *src_32bit, u32 size); -#define GPU_SetConstant(cl, name, value) do { \ +#define G_SetConstant(cl, name, value) do { \ name##__shaderconstanttype __src; \ __src.v = value; \ - GPU_SetConstant_((cl), (name), &__src, sizeof(__src)); \ + G_SetConstant_((cl), (name), &__src, sizeof(__src)); \ } while (0) //- Barrier -void GPU_Sync(GPU_CommandListHandle cl, GPU_BarrierDesc desc); +void G_Sync(G_CommandListHandle cl, G_BarrierDesc desc); -#define GPU_MemorySync(_cl, _resource, _sync_prev, _access_prev, _sync_next, _access_next) \ - GPU_Sync((_cl), (GPU_BarrierDesc) { \ +#define G_MemorySync(_cl, _resource, _sync_prev, _access_prev, _sync_next, _access_next) \ + G_Sync((_cl), (G_BarrierDesc) { \ .resource = (_resource), \ .sync_prev = _sync_prev, \ .access_prev = _access_prev, \ @@ -684,8 +684,8 @@ void GPU_Sync(GPU_CommandListHandle cl, GPU_BarrierDesc desc); .access_next = _access_next, \ }) -#define GPU_MemoryLayoutSync(_cl, _resource, _sync_prev, _access_prev, _sync_next, _access_next, _layout) \ - GPU_Sync((_cl), (GPU_BarrierDesc) { \ +#define G_MemoryLayoutSync(_cl, _resource, _sync_prev, _access_prev, _sync_next, _access_next, _layout) \ + G_Sync((_cl), (G_BarrierDesc) { \ .resource = (_resource), \ .sync_prev = _sync_prev, \ .access_prev = _access_prev, \ @@ -694,8 +694,8 @@ void GPU_Sync(GPU_CommandListHandle cl, GPU_BarrierDesc desc); .layout = _layout, \ }) -#define GPU_GlobalMemorySync(_cl, _sync_prev, _access_prev, _sync_next, _access_next) \ - GPU_Sync((_cl), (GPU_BarrierDesc) { \ +#define G_GlobalMemorySync(_cl, _sync_prev, _access_prev, _sync_next, _access_next) \ + G_Sync((_cl), (G_BarrierDesc) { \ .is_global = 1, \ .sync_prev = _sync_prev, \ .access_prev = _access_prev, \ @@ -703,54 +703,54 @@ void GPU_Sync(GPU_CommandListHandle cl, GPU_BarrierDesc desc); .access_next = _access_next, \ }) -#define GPU_DumbMemorySync(cl, resource) \ - GPU_MemorySync((cl), (resource), GPU_Stage_All, GPU_Access_All, GPU_Stage_All, GPU_Access_All) +#define G_DumbMemorySync(cl, resource) \ + G_MemorySync((cl), (resource), G_Stage_All, G_Access_All, G_Stage_All, G_Access_All) -#define GPU_DumbMemoryLayoutSync(cl, resource, layout) \ - GPU_MemoryLayoutSync((cl), (resource), GPU_Stage_All, GPU_Access_All, GPU_Stage_All, GPU_Access_All, (layout)) +#define G_DumbMemoryLayoutSync(cl, resource, layout) \ + G_MemoryLayoutSync((cl), (resource), G_Stage_All, G_Access_All, G_Stage_All, G_Access_All, (layout)) -#define GPU_DumbGlobalMemorySync(cl) \ - GPU_GlobalMemorySync((cl), GPU_Stage_All, GPU_Access_All, GPU_Stage_All, GPU_Access_All) +#define G_DumbGlobalMemorySync(cl) \ + G_GlobalMemorySync((cl), G_Stage_All, G_Access_All, G_Stage_All, G_Access_All) //- Compute -void GPU_Compute(GPU_CommandListHandle cl, ComputeShader cs, Vec3I32 groups); +void G_Compute(G_CommandListHandle cl, ComputeShader cs, Vec3I32 groups); //- Rasterize -void GPU_Rasterize(GPU_CommandListHandle cl, +void G_Rasterize(G_CommandListHandle cl, VertexShader vs, PixelShader ps, - u32 instances_count, GPU_IndexBufferDesc index_buffer, - u32 render_targets_count, GPU_ResourceHandle *render_targets, + u32 instances_count, G_IndexBufferDesc index_buffer, + u32 render_targets_count, G_ResourceHandle *render_targets, Rng3 viewport, Rng2 scissor, - GPU_RasterMode mode); + G_RasterMode mode); //- Clear -void GPU_ClearRenderTarget(GPU_CommandListHandle cl, GPU_ResourceHandle render_target, Vec4 color); +void G_ClearRenderTarget(G_CommandListHandle cl, G_ResourceHandle render_target, Vec4 color); //////////////////////////////////////////////////////////// //~ @hookdecl Queue synchronization /* `waiter_queue` will block until `completion_queue` completes all submitted commands */ -void GPU_SyncQueue(GPU_QueueKind completion_queue, GPU_QueueKind waiter_queue); +void G_SyncQueue(G_QueueKind completion_queue, G_QueueKind waiter_queue); /* All queues will block until `completion_queue` completes all submitted commands */ -void GPU_SyncOtherQueues(GPU_QueueKind completion_queue); +void G_SyncOtherQueues(G_QueueKind completion_queue); //////////////////////////////////////////////////////////// //~ @hookdecl Statistics -GPU_Stats GPU_QueryStats(void); +G_Stats G_QueryStats(void); //////////////////////////////////////////////////////////// //~ @hookdecl Swapchain -GPU_SwapchainHandle GPU_AcquireSwapchain(u64 os_window_handle); -void GPU_ReleaseSwapchain(GPU_SwapchainHandle swapchain); +G_SwapchainHandle G_AcquireSwapchain(u64 os_window_handle); +void G_ReleaseSwapchain(G_SwapchainHandle swapchain); /* Waits until a new backbuffer is ready from the swapchain. * This should be called before rendering for minimum latency. */ -GPU_ResourceHandle GPU_PrepareBackbuffer(GPU_SwapchainHandle swapchain_handle, GPU_Format format, Vec2I32 size); +G_ResourceHandle G_PrepareBackbuffer(G_SwapchainHandle swapchain_handle, G_Format format, Vec2I32 size); -void GPU_CommitBackbuffer(GPU_ResourceHandle backbuffer, i32 vsync); +void G_CommitBackbuffer(G_ResourceHandle backbuffer, i32 vsync); diff --git a/src/gpu/gpu_dx12/gpu_dx12_core.c b/src/gpu/gpu_dx12/gpu_dx12_core.c index d0c0680a..b3100d46 100644 --- a/src/gpu/gpu_dx12/gpu_dx12_core.c +++ b/src/gpu/gpu_dx12/gpu_dx12_core.c @@ -1,11 +1,11 @@ -GPU_D12_SharedState GPU_D12_shared_state = ZI; +G_D12_SharedState G_D12_shared_state = ZI; //////////////////////////////////////////////////////////// //~ @hookimpl Bootstrap -void GPU_Bootstrap(void) +void G_Bootstrap(void) { - GPU_D12_SharedState *g = &GPU_D12_shared_state; + G_D12_SharedState *g = &G_D12_shared_state; TempArena scratch = BeginScratchNoConflict(); Arena *perm = PermArena(); @@ -150,16 +150,16 @@ void GPU_Bootstrap(void) //- Initialize command queues { - GPU_D12_CommandQueueDesc descs[] = { + G_D12_CommandQueueDesc descs[] = { { .type = D3D12_COMMAND_LIST_TYPE_DIRECT, .priority = D3D12_COMMAND_QUEUE_PRIORITY_HIGH }, { .type = D3D12_COMMAND_LIST_TYPE_COMPUTE, .priority = D3D12_COMMAND_QUEUE_PRIORITY_NORMAL }, { .type = D3D12_COMMAND_LIST_TYPE_COPY, .priority = D3D12_COMMAND_QUEUE_PRIORITY_NORMAL }, }; for (u32 i = 0; i < MinU32(countof(descs), countof(g->queues)); ++i) { - GPU_D12_CommandQueueDesc desc = descs[i]; + G_D12_CommandQueueDesc desc = descs[i]; D3D12_COMMAND_QUEUE_DESC d3d_desc = { .Type = desc.type, .Priority = desc.priority }; - GPU_D12_Queue *queue = &g->queues[i]; + G_D12_Queue *queue = &g->queues[i]; queue->desc = desc; HRESULT hr = ID3D12Device_CreateCommandQueue(g->device, &d3d_desc, &IID_ID3D12CommandQueue, (void **)&queue->d3d_queue); if (SUCCEEDED(hr)) @@ -178,27 +178,27 @@ void GPU_Bootstrap(void) { Struct(Dx12HeapDesc) { D3D12_DESCRIPTOR_HEAP_TYPE type; D3D12_DESCRIPTOR_HEAP_FLAGS flags; u64 max; }; - Dx12HeapDesc descs[GPU_D12_DescriptorHeapKind_Count] = { - [GPU_D12_DescriptorHeapKind_CbvSrvUav] = { + Dx12HeapDesc descs[G_D12_DescriptorHeapKind_Count] = { + [G_D12_DescriptorHeapKind_CbvSrvUav] = { .type = D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV, .flags = D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE, - .max = GPU_D12_MaxCbvSrvUavDescriptors, + .max = G_D12_MaxCbvSrvUavDescriptors, }, - [GPU_D12_DescriptorHeapKind_Rtv] = { + [G_D12_DescriptorHeapKind_Rtv] = { .type = D3D12_DESCRIPTOR_HEAP_TYPE_RTV, .flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE, - .max = GPU_D12_MaxRtvDescriptors, + .max = G_D12_MaxRtvDescriptors, }, - [GPU_D12_DescriptorHeapKind_Sampler] = { + [G_D12_DescriptorHeapKind_Sampler] = { .type = D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER, .flags = D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE, - .max = GPU_D12_MaxSamplerDescriptors, + .max = G_D12_MaxSamplerDescriptors, }, }; - for (GPU_D12_DescriptorHeapKind kind = 0; kind < countof(descs); ++kind) + for (G_D12_DescriptorHeapKind kind = 0; kind < countof(descs); ++kind) { Dx12HeapDesc desc = descs[kind]; - GPU_D12_DescriptorHeap *heap = &g->descriptor_heaps[kind]; + G_D12_DescriptorHeap *heap = &g->descriptor_heaps[kind]; heap->descriptors_arena = AcquireArena(Gibi(1)); heap->type = desc.type; @@ -225,8 +225,8 @@ void GPU_Bootstrap(void) if (SUCCEEDED(hr)) { /* Push an empty descriptor at index 0, so that a handle with a value of 0 always represents nil */ - GPU_D12_Arena *gpu_perm = GPU_D12_ArenaFromHandle(GPU_PermArena()); - GPU_D12_Descriptor *nil_descriptor = GPU_D12_PushDescriptor(gpu_perm, kind, 0); + G_D12_Arena *gpu_perm = G_D12_ArenaFromHandle(G_PermArena()); + G_D12_Descriptor *nil_descriptor = G_D12_PushDescriptor(gpu_perm, kind, 0); Assert(nil_descriptor->index == 0); } @@ -247,8 +247,8 @@ void GPU_Bootstrap(void) ID3D10Blob *blob = 0; if (SUCCEEDED(hr)) { - D3D12_ROOT_PARAMETER params[GPU_D12_NumShaderConstants] = ZI; - for (i32 slot = 0; slot < GPU_D12_NumShaderConstants; ++slot) + D3D12_ROOT_PARAMETER params[G_D12_NumShaderConstants] = ZI; + for (i32 slot = 0; slot < G_D12_NumShaderConstants; ++slot) { D3D12_ROOT_PARAMETER *param = ¶ms[slot]; param->ParameterType = D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS; @@ -289,13 +289,13 @@ void GPU_Bootstrap(void) ////////////////////////////// //- Initialize workers - for (GPU_QueueKind kind = 0; kind < GPU_NumQueues; ++kind) + for (G_QueueKind kind = 0; kind < G_NumQueues; ++kind) { String name = ZI; - if (kind == GPU_QueueKind_Direct) name = Lit("Direct queue worker"); - if (kind == GPU_QueueKind_AsyncCompute) name = Lit("Compute queue worker"); - if (kind == GPU_QueueKind_AsyncCopy) name = Lit("Copy queue worker"); - DispatchWave(name, 1, GPU_D12_WorkerEntry, (void *)(u64)kind); + if (kind == G_QueueKind_Direct) name = Lit("Direct queue worker"); + if (kind == G_QueueKind_AsyncCompute) name = Lit("Compute queue worker"); + if (kind == G_QueueKind_AsyncCopy) name = Lit("Copy queue worker"); + DispatchWave(name, 1, G_D12_WorkerEntry, (void *)(u64)kind); } EndScratch(scratch); @@ -304,101 +304,101 @@ void GPU_Bootstrap(void) //////////////////////////////////////////////////////////// //~ Helpers -GPU_D12_Arena *GPU_D12_ArenaFromHandle(GPU_ArenaHandle handle) +G_D12_Arena *G_D12_ArenaFromHandle(G_ArenaHandle handle) { - return (GPU_D12_Arena *)handle.v; + return (G_D12_Arena *)handle.v; } -GPU_D12_CmdList *GPU_D12_CmdListFromHandle(GPU_CommandListHandle handle) +G_D12_CmdList *G_D12_CmdListFromHandle(G_CommandListHandle handle) { - return (GPU_D12_CmdList *)handle.v; + return (G_D12_CmdList *)handle.v; } -GPU_D12_Resource *GPU_D12_ResourceFromHandle(GPU_ResourceHandle handle) +G_D12_Resource *G_D12_ResourceFromHandle(G_ResourceHandle handle) { - return (GPU_D12_Resource *)handle.v; + return (G_D12_Resource *)handle.v; } -GPU_D12_Swapchain *GPU_D12_SwapchainFromHandle(GPU_SwapchainHandle handle) +G_D12_Swapchain *G_D12_SwapchainFromHandle(G_SwapchainHandle handle) { - return (GPU_D12_Swapchain *)handle.v; + return (G_D12_Swapchain *)handle.v; } -DXGI_FORMAT GPU_D12_DxgiFormatFromGpuFormat(GPU_Format format) +DXGI_FORMAT G_D12_DxgiFormatFromGpuFormat(G_Format format) { return (DXGI_FORMAT)format; } -D3D12_BARRIER_SYNC GPU_D12_BarrierSyncFromStages(GPU_Stage stages) +D3D12_BARRIER_SYNC G_D12_BarrierSyncFromStages(G_Stage stages) { D3D12_BARRIER_SYNC result = 0; - if (stages == GPU_Stage_All) + if (stages == G_Stage_All) { result = D3D12_BARRIER_SYNC_ALL; } else { - result |= D3D12_BARRIER_SYNC_COMPUTE_SHADING * AnyBit(stages, GPU_Stage_ComputeShading); - result |= D3D12_BARRIER_SYNC_INDEX_INPUT * AnyBit(stages, GPU_Stage_IndexAssembly); - result |= D3D12_BARRIER_SYNC_VERTEX_SHADING * AnyBit(stages, GPU_Stage_VertexShading); - result |= D3D12_BARRIER_SYNC_PIXEL_SHADING * AnyBit(stages, GPU_Stage_PixelShading); - result |= D3D12_BARRIER_SYNC_DEPTH_STENCIL * AnyBit(stages, GPU_Stage_DepthStencil); - result |= D3D12_BARRIER_SYNC_RENDER_TARGET * AnyBit(stages, GPU_Stage_RenderTarget); - result |= D3D12_BARRIER_SYNC_COPY * AnyBit(stages, GPU_Stage_Copy); - result |= D3D12_BARRIER_SYNC_EXECUTE_INDIRECT * AnyBit(stages, GPU_Stage_Indirect); + result |= D3D12_BARRIER_SYNC_COMPUTE_SHADING * AnyBit(stages, G_Stage_ComputeShading); + result |= D3D12_BARRIER_SYNC_INDEX_INPUT * AnyBit(stages, G_Stage_IndexAssembly); + result |= D3D12_BARRIER_SYNC_VERTEX_SHADING * AnyBit(stages, G_Stage_VertexShading); + result |= D3D12_BARRIER_SYNC_PIXEL_SHADING * AnyBit(stages, G_Stage_PixelShading); + result |= D3D12_BARRIER_SYNC_DEPTH_STENCIL * AnyBit(stages, G_Stage_DepthStencil); + result |= D3D12_BARRIER_SYNC_RENDER_TARGET * AnyBit(stages, G_Stage_RenderTarget); + result |= D3D12_BARRIER_SYNC_COPY * AnyBit(stages, G_Stage_Copy); + result |= D3D12_BARRIER_SYNC_EXECUTE_INDIRECT * AnyBit(stages, G_Stage_Indirect); } return result; } -D3D12_BARRIER_ACCESS GPU_D12_BarrierAccessFromAccesses(GPU_Access accesses) +D3D12_BARRIER_ACCESS G_D12_BarrierAccessFromAccesses(G_Access accesses) { D3D12_BARRIER_ACCESS result = 0; if (accesses == 0) { result = D3D12_BARRIER_ACCESS_NO_ACCESS; } - else if (accesses == GPU_Access_All) + else if (accesses == G_Access_All) { result = D3D12_BARRIER_ACCESS_COMMON; } else { - result |= D3D12_BARRIER_ACCESS_UNORDERED_ACCESS * AnyBit(accesses, GPU_Access_ShaderReadWrite); - result |= D3D12_BARRIER_ACCESS_SHADER_RESOURCE * AnyBit(accesses, GPU_Access_ShaderRead); - result |= D3D12_BARRIER_ACCESS_COPY_DEST * AnyBit(accesses, GPU_Access_CopyWrite); - result |= D3D12_BARRIER_ACCESS_COPY_SOURCE * AnyBit(accesses, GPU_Access_CopyRead); - result |= D3D12_BARRIER_ACCESS_INDEX_BUFFER * AnyBit(accesses, GPU_Access_IndexBuffer); - result |= D3D12_BARRIER_ACCESS_INDIRECT_ARGUMENT * AnyBit(accesses, GPU_Access_IndirectArgument); - result |= D3D12_BARRIER_ACCESS_DEPTH_STENCIL_READ * AnyBit(accesses, GPU_Access_DepthStencilRead); - result |= D3D12_BARRIER_ACCESS_DEPTH_STENCIL_WRITE * AnyBit(accesses, GPU_Access_DepthStencilWrite); - result |= D3D12_BARRIER_ACCESS_RENDER_TARGET * AnyBit(accesses, GPU_Access_RenderTargetWrite); + result |= D3D12_BARRIER_ACCESS_UNORDERED_ACCESS * AnyBit(accesses, G_Access_ShaderReadWrite); + result |= D3D12_BARRIER_ACCESS_SHADER_RESOURCE * AnyBit(accesses, G_Access_ShaderRead); + result |= D3D12_BARRIER_ACCESS_COPY_DEST * AnyBit(accesses, G_Access_CopyWrite); + result |= D3D12_BARRIER_ACCESS_COPY_SOURCE * AnyBit(accesses, G_Access_CopyRead); + result |= D3D12_BARRIER_ACCESS_INDEX_BUFFER * AnyBit(accesses, G_Access_IndexBuffer); + result |= D3D12_BARRIER_ACCESS_INDIRECT_ARGUMENT * AnyBit(accesses, G_Access_IndirectArgument); + result |= D3D12_BARRIER_ACCESS_DEPTH_STENCIL_READ * AnyBit(accesses, G_Access_DepthStencilRead); + result |= D3D12_BARRIER_ACCESS_DEPTH_STENCIL_WRITE * AnyBit(accesses, G_Access_DepthStencilWrite); + result |= D3D12_BARRIER_ACCESS_RENDER_TARGET * AnyBit(accesses, G_Access_RenderTargetWrite); } return result; } -D3D12_BARRIER_LAYOUT GPU_D12_BarrierLayoutFromLayout(GPU_Layout layout) +D3D12_BARRIER_LAYOUT G_D12_BarrierLayoutFromLayout(G_Layout layout) { PERSIST Readonly D3D12_BARRIER_LAYOUT translate[] = { - [GPU_Layout_Undefined] = D3D12_BARRIER_LAYOUT_UNDEFINED, - [GPU_Layout_Simultaneous] = D3D12_BARRIER_LAYOUT_COMMON, - [GPU_Layout_AnyQueue_ShaderRead_CopyRead_CopyWrite_Present] = D3D12_BARRIER_LAYOUT_COMMON, - [GPU_Layout_DirectComputeQueue_ShaderReadWrite] = D3D12_BARRIER_LAYOUT_UNORDERED_ACCESS, - [GPU_Layout_DirectComputeQueue_ShaderRead_CopyRead] = D3D12_BARRIER_LAYOUT_GENERIC_READ, - [GPU_Layout_DirectComputeQueue_ShaderRead] = D3D12_BARRIER_LAYOUT_SHADER_RESOURCE, - [GPU_Layout_DirectComputeQueue_CopyRead] = D3D12_BARRIER_LAYOUT_COPY_SOURCE, - [GPU_Layout_DirectQueue_ShaderRead_ShaderReadWrite_CopyRead_CopyWrite] = D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_COMMON, - [GPU_Layout_DirectQueue_ShaderReadWrite] = D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_UNORDERED_ACCESS, - [GPU_Layout_DirectQueue_ShaderRead_CopyRead_DepthStencilRead] = D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_GENERIC_READ, - [GPU_Layout_DirectQueue_ShaderRead] = D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_SHADER_RESOURCE, - [GPU_Layout_DirectQueue_CopyRead] = D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_COPY_SOURCE, - [GPU_Layout_DirectQueue_DepthStencilRead_DepthStencilWrite] = D3D12_BARRIER_LAYOUT_DEPTH_STENCIL_WRITE, - [GPU_Layout_DirectQueue_DepthStencilRead] = D3D12_BARRIER_LAYOUT_DEPTH_STENCIL_READ, - [GPU_Layout_DirectQueue_RenderTargetWrite] = D3D12_BARRIER_LAYOUT_RENDER_TARGET, - [GPU_Layout_ComputeQueue_ShaderRead_ShaderReadWrite_CopyRead_CopyWrite] = D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_COMMON, - [GPU_Layout_ComputeQueue_ShaderReadWrite] = D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_UNORDERED_ACCESS, - [GPU_Layout_ComputeQueue_ShaderRead_CopyRead] = D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_GENERIC_READ, - [GPU_Layout_ComputeQueue_ShaderRead] = D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_SHADER_RESOURCE, - [GPU_Layout_ComputeQueue_CopyRead] = D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_COPY_SOURCE, + [G_Layout_Undefined] = D3D12_BARRIER_LAYOUT_UNDEFINED, + [G_Layout_Simultaneous] = D3D12_BARRIER_LAYOUT_COMMON, + [G_Layout_AnyQueue_ShaderRead_CopyRead_CopyWrite_Present] = D3D12_BARRIER_LAYOUT_COMMON, + [G_Layout_DirectComputeQueue_ShaderReadWrite] = D3D12_BARRIER_LAYOUT_UNORDERED_ACCESS, + [G_Layout_DirectComputeQueue_ShaderRead_CopyRead] = D3D12_BARRIER_LAYOUT_GENERIC_READ, + [G_Layout_DirectComputeQueue_ShaderRead] = D3D12_BARRIER_LAYOUT_SHADER_RESOURCE, + [G_Layout_DirectComputeQueue_CopyRead] = D3D12_BARRIER_LAYOUT_COPY_SOURCE, + [G_Layout_DirectQueue_ShaderRead_ShaderReadWrite_CopyRead_CopyWrite] = D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_COMMON, + [G_Layout_DirectQueue_ShaderReadWrite] = D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_UNORDERED_ACCESS, + [G_Layout_DirectQueue_ShaderRead_CopyRead_DepthStencilRead] = D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_GENERIC_READ, + [G_Layout_DirectQueue_ShaderRead] = D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_SHADER_RESOURCE, + [G_Layout_DirectQueue_CopyRead] = D3D12_BARRIER_LAYOUT_DIRECT_QUEUE_COPY_SOURCE, + [G_Layout_DirectQueue_DepthStencilRead_DepthStencilWrite] = D3D12_BARRIER_LAYOUT_DEPTH_STENCIL_WRITE, + [G_Layout_DirectQueue_DepthStencilRead] = D3D12_BARRIER_LAYOUT_DEPTH_STENCIL_READ, + [G_Layout_DirectQueue_RenderTargetWrite] = D3D12_BARRIER_LAYOUT_RENDER_TARGET, + [G_Layout_ComputeQueue_ShaderRead_ShaderReadWrite_CopyRead_CopyWrite] = D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_COMMON, + [G_Layout_ComputeQueue_ShaderReadWrite] = D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_UNORDERED_ACCESS, + [G_Layout_ComputeQueue_ShaderRead_CopyRead] = D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_GENERIC_READ, + [G_Layout_ComputeQueue_ShaderRead] = D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_SHADER_RESOURCE, + [G_Layout_ComputeQueue_CopyRead] = D3D12_BARRIER_LAYOUT_COMPUTE_QUEUE_COPY_SOURCE, }; return translate[layout]; }; @@ -406,15 +406,15 @@ D3D12_BARRIER_LAYOUT GPU_D12_BarrierLayoutFromLayout(GPU_Layout layout) //////////////////////////////////////////////////////////// //~ Pipeline -GPU_D12_Pipeline *GPU_D12_PipelineFromDesc(GPU_D12_PipelineDesc desc) +G_D12_Pipeline *G_D12_PipelineFromDesc(G_D12_PipelineDesc desc) { - GPU_D12_SharedState *g = &GPU_D12_shared_state; + G_D12_SharedState *g = &G_D12_shared_state; u64 hash = RandU64FromSeed(HashFnv64(Fnv64Basis, StringFromStruct(&desc))); /* Fetch pipeline from cache */ - GPU_D12_Pipeline *pipeline = 0; + G_D12_Pipeline *pipeline = 0; b32 is_pipeline_new = 0; - GPU_D12_PipelineBin *bin = &g->pipeline_bins[hash % countof(g->pipeline_bins)]; + G_D12_PipelineBin *bin = &g->pipeline_bins[hash % countof(g->pipeline_bins)]; { { Lock lock = LockS(&bin->mutex); @@ -435,7 +435,7 @@ GPU_D12_Pipeline *GPU_D12_PipelineFromDesc(GPU_D12_PipelineDesc desc) { Arena *perm = PermArena(); PushAlign(perm, CachelineSize); - pipeline = PushStruct(perm, GPU_D12_Pipeline); + pipeline = PushStruct(perm, G_D12_Pipeline); pipeline->desc = desc; pipeline->hash = hash; is_pipeline_new = 1; @@ -512,7 +512,7 @@ GPU_D12_Pipeline *GPU_D12_PipelineFromDesc(GPU_D12_PipelineDesc desc) for (i32 i = 0; i < (i32)countof(desc.render_target_formats); ++i) { StaticAssert(countof(pso_desc.RTVFormats) <= countof(desc.render_target_formats)); - DXGI_FORMAT format = GPU_D12_DxgiFormatFromGpuFormat(desc.render_target_formats[i]); + DXGI_FORMAT format = G_D12_DxgiFormatFromGpuFormat(desc.render_target_formats[i]); if (format != DXGI_FORMAT_UNKNOWN) { pso_desc.RTVFormats[pso_desc.NumRenderTargets++] = format; @@ -562,22 +562,22 @@ GPU_D12_Pipeline *GPU_D12_PipelineFromDesc(GPU_D12_PipelineDesc desc) //////////////////////////////////////////////////////////// //~ Queue -GPU_D12_Queue *GPU_D12_QueueFromKind(GPU_QueueKind kind) +G_D12_Queue *G_D12_QueueFromKind(G_QueueKind kind) { - GPU_D12_SharedState *g = &GPU_D12_shared_state; + G_D12_SharedState *g = &G_D12_shared_state; return &g->queues[kind]; } //////////////////////////////////////////////////////////// //~ Raw command list -GPU_D12_RawCommandList *GPU_D12_PrepareRawCommandList(GPU_QueueKind queue_kind) +G_D12_RawCommandList *G_D12_PrepareRawCommandList(G_QueueKind queue_kind) { - GPU_D12_SharedState *g = &GPU_D12_shared_state; - GPU_D12_Queue *queue = GPU_D12_QueueFromKind(queue_kind); + G_D12_SharedState *g = &G_D12_shared_state; + G_D12_Queue *queue = G_D12_QueueFromKind(queue_kind); /* Try to pull first completed command list from queue */ - GPU_D12_RawCommandList *cl = ZI; + G_D12_RawCommandList *cl = ZI; { Lock lock = LockE(&queue->commit_mutex); { @@ -601,7 +601,7 @@ GPU_D12_RawCommandList *GPU_D12_PrepareRawCommandList(GPU_QueueKind queue_kind) Arena *perm = PermArena(); { PushAlign(perm, CachelineSize); - cl = PushStruct(perm, GPU_D12_RawCommandList); + cl = PushStruct(perm, G_D12_RawCommandList); PushAlign(perm, CachelineSize); } cl->queue = queue; @@ -624,14 +624,14 @@ GPU_D12_RawCommandList *GPU_D12_PrepareRawCommandList(GPU_QueueKind queue_kind) } /* Initialize Direct queue CPU-only descriptors */ - if (SUCCEEDED(hr) && queue_kind == GPU_QueueKind_Direct) + if (SUCCEEDED(hr) && queue_kind == G_QueueKind_Direct) { - GPU_D12_Arena *gpu_perm = GPU_D12_ArenaFromHandle(GPU_PermArena()); + G_D12_Arena *gpu_perm = G_D12_ArenaFromHandle(G_PermArena()); for (u32 i = 0; i < countof(cl->rtv_descriptors); ++i) { - cl->rtv_descriptors[i] = GPU_D12_PushDescriptor(gpu_perm, GPU_D12_DescriptorHeapKind_Rtv, 0); + cl->rtv_descriptors[i] = G_D12_PushDescriptor(gpu_perm, G_D12_DescriptorHeapKind_Rtv, 0); } - cl->rtv_clear_descriptor = GPU_D12_PushDescriptor(gpu_perm, GPU_D12_DescriptorHeapKind_Rtv, 0); + cl->rtv_clear_descriptor = G_D12_PushDescriptor(gpu_perm, G_D12_DescriptorHeapKind_Rtv, 0); } } @@ -665,9 +665,9 @@ GPU_D12_RawCommandList *GPU_D12_PrepareRawCommandList(GPU_QueueKind queue_kind) return cl; } -void GPU_D12_CommitRawCommandList(GPU_D12_RawCommandList *cl) +void G_D12_CommitRawCommandList(G_D12_RawCommandList *cl) { - GPU_D12_Queue *queue = cl->queue; + G_D12_Queue *queue = cl->queue; /* Close */ { @@ -700,21 +700,21 @@ void GPU_D12_CommitRawCommandList(GPU_D12_RawCommandList *cl) //////////////////////////////////////////////////////////// //~ @hookimpl Arena -GPU_ArenaHandle GPU_AcquireArena(void) +G_ArenaHandle G_AcquireArena(void) { - GPU_D12_Arena *gpu_arena = 0; + G_D12_Arena *gpu_arena = 0; { Arena *perm = PermArena(); PushAlign(perm, CachelineSize); - gpu_arena = PushStruct(perm, GPU_D12_Arena); + gpu_arena = PushStruct(perm, G_D12_Arena); PushAlign(perm, CachelineSize); } gpu_arena->arena = AcquireArena(Gibi(1)); - return GPU_D12_MakeHandle(GPU_ArenaHandle, gpu_arena); + return G_D12_MakeHandle(G_ArenaHandle, gpu_arena); } -void GPU_ReleaseArena(GPU_ArenaHandle arena) +void G_ReleaseArena(G_ArenaHandle arena) { /* TODO */ } @@ -722,32 +722,32 @@ void GPU_ReleaseArena(GPU_ArenaHandle arena) //////////////////////////////////////////////////////////// //~ Resource helpers -GPU_D12_Descriptor *GPU_D12_DescriptorFromIndex(GPU_D12_DescriptorHeapKind heap_kind, u32 index) +G_D12_Descriptor *G_D12_DescriptorFromIndex(G_D12_DescriptorHeapKind heap_kind, u32 index) { - GPU_D12_SharedState *g = &GPU_D12_shared_state; - GPU_D12_DescriptorHeap *heap = &g->descriptor_heaps[heap_kind]; - GPU_D12_Descriptor *descriptors = ArenaFirst(heap->descriptors_arena, GPU_D12_Descriptor); + G_D12_SharedState *g = &G_D12_shared_state; + G_D12_DescriptorHeap *heap = &g->descriptor_heaps[heap_kind]; + G_D12_Descriptor *descriptors = ArenaFirst(heap->descriptors_arena, G_D12_Descriptor); return &descriptors[index]; } -GPU_D12_Descriptor *GPU_D12_PushDescriptor(GPU_D12_Arena *gpu_arena, GPU_D12_DescriptorHeapKind heap_kind, u32 forced) +G_D12_Descriptor *G_D12_PushDescriptor(G_D12_Arena *gpu_arena, G_D12_DescriptorHeapKind heap_kind, u32 forced) { - GPU_D12_SharedState *g = &GPU_D12_shared_state; - GPU_D12_DescriptorHeap *heap = &g->descriptor_heaps[heap_kind]; + G_D12_SharedState *g = &G_D12_shared_state; + G_D12_DescriptorHeap *heap = &g->descriptor_heaps[heap_kind]; - GPU_D12_Descriptor *descriptor = 0; + G_D12_Descriptor *descriptor = 0; /* Grab completed descriptor from arena */ if (forced == 0) { - GPU_D12_DescriptorList *descriptors_by_queue = gpu_arena->committed_descriptors_by_heap_and_queue[heap_kind]; - for (GPU_QueueKind queue_kind = 0; !descriptor && queue_kind < GPU_NumQueues; ++queue_kind) + G_D12_DescriptorList *descriptors_by_queue = gpu_arena->committed_descriptors_by_heap_and_queue[heap_kind]; + for (G_QueueKind queue_kind = 0; !descriptor && queue_kind < G_NumQueues; ++queue_kind) { - GPU_D12_DescriptorList *descriptors = &descriptors_by_queue[queue_kind]; + G_D12_DescriptorList *descriptors = &descriptors_by_queue[queue_kind]; descriptor = descriptors->first; if (descriptor) { - GPU_D12_Queue *queue = GPU_D12_QueueFromKind(queue_kind); + G_D12_Queue *queue = G_D12_QueueFromKind(queue_kind); u64 queue_commit_completion = ID3D12Fence_GetCompletedValue(queue->commit_fence); if (queue_commit_completion >= descriptor->queue_commit_target) { @@ -779,12 +779,12 @@ GPU_D12_Descriptor *GPU_D12_PushDescriptor(GPU_D12_Arena *gpu_arena, GPU_D12_Des } else { - u32 descriptors_count = ArenaCount(heap->descriptors_arena, GPU_D12_Descriptor); + u32 descriptors_count = ArenaCount(heap->descriptors_arena, G_D12_Descriptor); if (descriptors_count >= heap->max_count) { Panic(Lit("Max descriptors reached in heap")); } - descriptor = PushStruct(heap->descriptors_arena, GPU_D12_Descriptor); + descriptor = PushStruct(heap->descriptors_arena, G_D12_Descriptor); index = descriptors_count; } } @@ -796,14 +796,14 @@ GPU_D12_Descriptor *GPU_D12_PushDescriptor(GPU_D12_Arena *gpu_arena, GPU_D12_Des } /* Push descriptors if index slot is past end of heap */ - u32 descriptors_count = ArenaCount(heap->descriptors_arena, GPU_D12_Descriptor); + u32 descriptors_count = ArenaCount(heap->descriptors_arena, G_D12_Descriptor); if (index >= descriptors_count) { u32 pushed_count = index - descriptors_count + 1; - PushStructs(heap->descriptors_arena, GPU_D12_Descriptor, pushed_count); + PushStructs(heap->descriptors_arena, G_D12_Descriptor, pushed_count); for (u32 pushed_index = descriptors_count; pushed_index < descriptors_count + pushed_count; ++pushed_index) { - GPU_D12_Descriptor *pushed = &(ArenaFirst(heap->descriptors_arena, GPU_D12_Descriptor)[pushed_index]); + G_D12_Descriptor *pushed = &(ArenaFirst(heap->descriptors_arena, G_D12_Descriptor)[pushed_index]); if (pushed_index < index) { pushed->heap = heap; @@ -821,7 +821,7 @@ GPU_D12_Descriptor *GPU_D12_PushDescriptor(GPU_D12_Arena *gpu_arena, GPU_D12_Des /* Search free list for freed descriptor with matching index */ if (!descriptor) { - for (GPU_D12_Descriptor *n = heap->first_free; n; n = n->next) + for (G_D12_Descriptor *n = heap->first_free; n; n = n->next) { if (n->index == index) { @@ -835,7 +835,7 @@ GPU_D12_Descriptor *GPU_D12_PushDescriptor(GPU_D12_Arena *gpu_arena, GPU_D12_Des if (!descriptor) { Arena *perm = PermArena(); - Panic(StringF(perm, "Tried to force push a GPU pointer into slot %F, but a descriptor already exists there (current heap count: %F)", FmtUint(index), FmtUint(ArenaCount(heap->descriptors_arena, GPU_D12_Descriptor)))); + Panic(StringF(perm, "Tried to force push a GPU pointer into slot %F, but a descriptor already exists there (current heap count: %F)", FmtUint(index), FmtUint(ArenaCount(heap->descriptors_arena, G_D12_Descriptor)))); } } } @@ -856,10 +856,10 @@ GPU_D12_Descriptor *GPU_D12_PushDescriptor(GPU_D12_Arena *gpu_arena, GPU_D12_Des //- Resource creation -GPU_ResourceHandle GPU_PushBufferResource(GPU_ArenaHandle arena_handle, GPU_BufferResourceDesc desc) +G_ResourceHandle G_PushBufferResource(G_ArenaHandle arena_handle, G_BufferResourceDesc desc) { - GPU_D12_SharedState *g = &GPU_D12_shared_state; - GPU_D12_Arena *gpu_arena = GPU_D12_ArenaFromHandle(arena_handle); + G_D12_SharedState *g = &G_D12_shared_state; + G_D12_Arena *gpu_arena = G_D12_ArenaFromHandle(arena_handle); /* Create resource heap */ if (!gpu_arena->d3d_resource_heap) @@ -897,7 +897,7 @@ GPU_ResourceHandle GPU_PushBufferResource(GPU_ArenaHandle arena_handle, GPU_Buff d3d_desc.MipLevels = 1; d3d_desc.SampleDesc.Count = 1; d3d_desc.SampleDesc.Quality = 0; - d3d_desc.Flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS * AnyBit(desc.flags, GPU_ResourceFlag_AllowShaderReadWrite); + d3d_desc.Flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS * AnyBit(desc.flags, G_ResourceFlag_AllowShaderReadWrite); u64 alloc_size = 0; u64 alloc_align = 0; @@ -929,7 +929,7 @@ GPU_ResourceHandle GPU_PushBufferResource(GPU_ArenaHandle arena_handle, GPU_Buff (void **)&d3d_resource); } - GPU_D12_Resource *resource = PushStruct(gpu_arena->arena, GPU_D12_Resource); + G_D12_Resource *resource = PushStruct(gpu_arena->arena, G_D12_Resource); resource->d3d_resource = d3d_resource; resource->uid = Atomic64FetchAdd(&g->resource_creation_gen.v, 1) + 1; resource->flags = desc.flags; @@ -938,14 +938,14 @@ GPU_ResourceHandle GPU_PushBufferResource(GPU_ArenaHandle arena_handle, GPU_Buff resource->buffer_size_aligned = aligned_size; resource->buffer_gpu_address = ID3D12Resource_GetGPUVirtualAddress(d3d_resource); - return GPU_D12_MakeHandle(GPU_ResourceHandle, resource); + return G_D12_MakeHandle(G_ResourceHandle, resource); } -GPU_ResourceHandle GPU_PushTextureResource(GPU_ArenaHandle arena_handle, GPU_TextureResourceDesc desc) +G_ResourceHandle G_PushTextureResource(G_ArenaHandle arena_handle, G_TextureResourceDesc desc) { - GPU_D12_SharedState *g = &GPU_D12_shared_state; - GPU_D12_Arena *gpu_arena = GPU_D12_ArenaFromHandle(arena_handle); - D3D12_BARRIER_LAYOUT initial_layout = GPU_D12_BarrierLayoutFromLayout(desc.initial_layout); + G_D12_SharedState *g = &G_D12_shared_state; + G_D12_Arena *gpu_arena = G_D12_ArenaFromHandle(arena_handle); + D3D12_BARRIER_LAYOUT initial_layout = G_D12_BarrierLayoutFromLayout(desc.initial_layout); /* Create resource heap */ if (!gpu_arena->d3d_resource_heap) @@ -973,19 +973,19 @@ GPU_ResourceHandle GPU_PushTextureResource(GPU_ArenaHandle arena_handle, GPU_Tex ID3D12Resource *d3d_resource = 0; { D3D12_RESOURCE_DESC1 d3d_desc = ZI; - d3d_desc.Dimension = desc.kind == GPU_TextureKind_1D ? D3D12_RESOURCE_DIMENSION_TEXTURE1D : - desc.kind == GPU_TextureKind_2D ? D3D12_RESOURCE_DIMENSION_TEXTURE2D : + d3d_desc.Dimension = desc.kind == G_TextureKind_1D ? D3D12_RESOURCE_DIMENSION_TEXTURE1D : + desc.kind == G_TextureKind_2D ? D3D12_RESOURCE_DIMENSION_TEXTURE2D : D3D12_RESOURCE_DIMENSION_TEXTURE3D; - d3d_desc.Format = GPU_D12_DxgiFormatFromGpuFormat(desc.format); + d3d_desc.Format = G_D12_DxgiFormatFromGpuFormat(desc.format); d3d_desc.Width = MaxI32(desc.dims.x, 1); d3d_desc.Height = MaxI32(desc.dims.y, 1); d3d_desc.DepthOrArraySize = MaxI32(desc.dims.z, 1); d3d_desc.MipLevels = MaxI32(desc.mip_levels, 1); d3d_desc.SampleDesc.Count = 1; d3d_desc.SampleDesc.Quality = 0; - d3d_desc.Flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS * AnyBit(desc.flags, GPU_ResourceFlag_AllowShaderReadWrite); - d3d_desc.Flags |= D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET * AnyBit(desc.flags, GPU_ResourceFlag_AllowRenderTarget); - d3d_desc.Flags |= D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL * AnyBit(desc.flags, GPU_ResourceFlag_AllowDepthStencil); + d3d_desc.Flags |= D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS * AnyBit(desc.flags, G_ResourceFlag_AllowShaderReadWrite); + d3d_desc.Flags |= D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET * AnyBit(desc.flags, G_ResourceFlag_AllowRenderTarget); + d3d_desc.Flags |= D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL * AnyBit(desc.flags, G_ResourceFlag_AllowDepthStencil); u64 alloc_size = 0; u64 alloc_align = 0; @@ -1025,7 +1025,7 @@ GPU_ResourceHandle GPU_PushTextureResource(GPU_ArenaHandle arena_handle, GPU_Tex (void **)&d3d_resource); } - GPU_D12_Resource *resource = PushStruct(gpu_arena->arena, GPU_D12_Resource); + G_D12_Resource *resource = PushStruct(gpu_arena->arena, G_D12_Resource); resource->d3d_resource = d3d_resource; resource->uid = Atomic64FetchAdd(&g->resource_creation_gen.v, 1) + 1; resource->flags = desc.flags; @@ -1036,29 +1036,29 @@ GPU_ResourceHandle GPU_PushTextureResource(GPU_ArenaHandle arena_handle, GPU_Tex resource->texture_mip_levels = desc.mip_levels; resource->texture_layout = initial_layout; - return GPU_D12_MakeHandle(GPU_ResourceHandle, resource); + return G_D12_MakeHandle(G_ResourceHandle, resource); } -GPU_ResourceHandle GPU_PushSamplerResource(GPU_ArenaHandle arena_handle, GPU_SamplerResourceDesc desc) +G_ResourceHandle G_PushSamplerResource(G_ArenaHandle arena_handle, G_SamplerResourceDesc desc) { - GPU_D12_SharedState *g = &GPU_D12_shared_state; - GPU_D12_Arena *gpu_arena = GPU_D12_ArenaFromHandle(arena_handle); + G_D12_SharedState *g = &G_D12_shared_state; + G_D12_Arena *gpu_arena = G_D12_ArenaFromHandle(arena_handle); - GPU_D12_Resource *resource = PushStruct(gpu_arena->arena, GPU_D12_Resource); + G_D12_Resource *resource = PushStruct(gpu_arena->arena, G_D12_Resource); resource->uid = Atomic64FetchAdd(&g->resource_creation_gen.v, 1) + 1; resource->sampler_desc = desc; - return GPU_D12_MakeHandle(GPU_ResourceHandle, resource); + return G_D12_MakeHandle(G_ResourceHandle, resource); } //////////////////////////////////////////////////////////// //~ @hookimpl Shader resource references -u32 GPU_PushRef(GPU_ArenaHandle arena_handle, GPU_ResourceHandle resource_handle, GPU_RefDesc ref_desc) +u32 G_PushRef(G_ArenaHandle arena_handle, G_ResourceHandle resource_handle, G_RefDesc ref_desc) { - GPU_D12_SharedState *g = &GPU_D12_shared_state; - GPU_D12_Arena *gpu_arena = GPU_D12_ArenaFromHandle(arena_handle); - GPU_D12_Resource *resource = GPU_D12_ResourceFromHandle(resource_handle); + G_D12_SharedState *g = &G_D12_shared_state; + G_D12_Arena *gpu_arena = G_D12_ArenaFromHandle(arena_handle); + G_D12_Resource *resource = G_D12_ResourceFromHandle(resource_handle); u32 result = 0; ShaderRefKind kind = ref_desc.kind; @@ -1083,10 +1083,10 @@ u32 GPU_PushRef(GPU_ArenaHandle arena_handle, GPU_ResourceHandle resource_handle - GPU_D12_Descriptor *descriptor = 0; + G_D12_Descriptor *descriptor = 0; if (is_buffer) { - descriptor = GPU_D12_PushDescriptor(gpu_arena, GPU_D12_DescriptorHeapKind_CbvSrvUav, ref_desc.forced); + descriptor = G_D12_PushDescriptor(gpu_arena, G_D12_DescriptorHeapKind_CbvSrvUav, ref_desc.forced); u64 buffer_size_aligned = resource->buffer_size_aligned; u64 num_elements_in_buffer = buffer_size_aligned / ref_desc.element_size; u64 num_elements_after_offset = num_elements_in_buffer > ref_desc.element_offset ? num_elements_in_buffer - ref_desc.element_offset : 0; @@ -1133,7 +1133,7 @@ u32 GPU_PushRef(GPU_ArenaHandle arena_handle, GPU_ResourceHandle resource_handle } else if (is_texture) { - descriptor = GPU_D12_PushDescriptor(gpu_arena, GPU_D12_DescriptorHeapKind_CbvSrvUav, ref_desc.forced); + descriptor = G_D12_PushDescriptor(gpu_arena, G_D12_DescriptorHeapKind_CbvSrvUav, ref_desc.forced); if (is_uav) { ID3D12Device_CreateUnorderedAccessView(g->device, resource->d3d_resource, 0, 0, descriptor->handle); @@ -1145,8 +1145,8 @@ u32 GPU_PushRef(GPU_ArenaHandle arena_handle, GPU_ResourceHandle resource_handle } else if (is_sampler) { - descriptor = GPU_D12_PushDescriptor(gpu_arena, GPU_D12_DescriptorHeapKind_Sampler, ref_desc.forced); - GPU_SamplerResourceDesc sampler_desc = resource->sampler_desc; + descriptor = G_D12_PushDescriptor(gpu_arena, G_D12_DescriptorHeapKind_Sampler, ref_desc.forced); + G_SamplerResourceDesc sampler_desc = resource->sampler_desc; D3D12_SAMPLER_DESC d3d_desc = ZI; { d3d_desc.Filter = (D3D12_FILTER)sampler_desc.filter; @@ -1178,59 +1178,59 @@ u32 GPU_PushRef(GPU_ArenaHandle arena_handle, GPU_ResourceHandle resource_handle //- Count -u64 GPU_CountBufferBytes(GPU_ResourceHandle buffer) +u64 G_CountBufferBytes(G_ResourceHandle buffer) { - GPU_D12_Resource *resource = GPU_D12_ResourceFromHandle(buffer); + G_D12_Resource *resource = G_D12_ResourceFromHandle(buffer); return resource->buffer_size; } -i32 GPU_Count1D(GPU_ResourceHandle texture) +i32 G_Count1D(G_ResourceHandle texture) { - GPU_D12_Resource *resource = GPU_D12_ResourceFromHandle(texture); + G_D12_Resource *resource = G_D12_ResourceFromHandle(texture); return resource->texture_dims.x; } -Vec2I32 GPU_Count2D(GPU_ResourceHandle texture) +Vec2I32 G_Count2D(G_ResourceHandle texture) { - GPU_D12_Resource *resource = GPU_D12_ResourceFromHandle(texture); + G_D12_Resource *resource = G_D12_ResourceFromHandle(texture); return VEC2I32(resource->texture_dims.x, resource->texture_dims.y); } -Vec3I32 GPU_Count3D(GPU_ResourceHandle texture) +Vec3I32 G_Count3D(G_ResourceHandle texture) { - GPU_D12_Resource *resource = GPU_D12_ResourceFromHandle(texture); + G_D12_Resource *resource = G_D12_ResourceFromHandle(texture); return resource->texture_dims; } -i32 GPU_CountWidth(GPU_ResourceHandle texture) +i32 G_CountWidth(G_ResourceHandle texture) { - GPU_D12_Resource *resource = GPU_D12_ResourceFromHandle(texture); + G_D12_Resource *resource = G_D12_ResourceFromHandle(texture); return resource->texture_dims.x; } -i32 GPU_CountHeight(GPU_ResourceHandle texture) +i32 G_CountHeight(G_ResourceHandle texture) { - GPU_D12_Resource *resource = GPU_D12_ResourceFromHandle(texture); + G_D12_Resource *resource = G_D12_ResourceFromHandle(texture); return resource->texture_dims.y; } -i32 GPU_CountDepth(GPU_ResourceHandle texture) +i32 G_CountDepth(G_ResourceHandle texture) { - GPU_D12_Resource *resource = GPU_D12_ResourceFromHandle(texture); + G_D12_Resource *resource = G_D12_ResourceFromHandle(texture); return resource->texture_dims.z; } //////////////////////////////////////////////////////////// //~ Command helpers -GPU_D12_Cmd *GPU_D12_PushCmd(GPU_D12_CmdList *cl) +G_D12_Cmd *G_D12_PushCmd(G_D12_CmdList *cl) { - GPU_D12_SharedState *g = &GPU_D12_shared_state; + G_D12_SharedState *g = &G_D12_shared_state; /* Grab chunk */ - GPU_D12_CmdChunk *chunk = cl->last_cmd_chunk; + G_D12_CmdChunk *chunk = cl->last_cmd_chunk; { - if (chunk && chunk->cmds_count >= GPU_D12_CmdsPerChunk) + if (chunk && chunk->cmds_count >= G_D12_CmdsPerChunk) { chunk = 0; } @@ -1247,7 +1247,7 @@ GPU_D12_Cmd *GPU_D12_PushCmd(GPU_D12_CmdList *cl) Unlock(&lock); if (chunk) { - GPU_D12_Cmd *cmds = chunk->cmds; + G_D12_Cmd *cmds = chunk->cmds; ZeroStruct(chunk); chunk->cmds = cmds; } @@ -1255,8 +1255,8 @@ GPU_D12_Cmd *GPU_D12_PushCmd(GPU_D12_CmdList *cl) if (!chunk) { Arena *perm = PermArena(); - chunk = PushStruct(perm, GPU_D12_CmdChunk); - chunk->cmds = PushStructsNoZero(perm, GPU_D12_Cmd, GPU_D12_CmdsPerChunk); + chunk = PushStruct(perm, G_D12_CmdChunk); + chunk->cmds = PushStructsNoZero(perm, G_D12_Cmd, G_D12_CmdsPerChunk); } if (chunk != cl->last_cmd_chunk) { @@ -1265,30 +1265,30 @@ GPU_D12_Cmd *GPU_D12_PushCmd(GPU_D12_CmdList *cl) } /* Push cmd to chunk */ - GPU_D12_Cmd *cmd = &chunk->cmds[chunk->cmds_count++]; + G_D12_Cmd *cmd = &chunk->cmds[chunk->cmds_count++]; ++cl->cmds_count; return cmd; } -GPU_D12_Cmd *GPU_D12_PushConstCmd(GPU_D12_CmdList *cl, i32 slot, void *v) +G_D12_Cmd *G_D12_PushConstCmd(G_D12_CmdList *cl, i32 slot, void *v) { - GPU_D12_Cmd *cmd = GPU_D12_PushCmd(cl); - cmd->kind = GPU_D12_CmdKind_Constant; + G_D12_Cmd *cmd = G_D12_PushCmd(cl); + cmd->kind = G_D12_CmdKind_Constant; cmd->constant.slot = slot; CopyBytes(&cmd->constant.value, v, 4); return cmd; } -GPU_D12_StagingRegionNode *GPU_D12_PushStagingRegion(GPU_D12_CmdList *cl, u64 size) +G_D12_StagingRegionNode *G_D12_PushStagingRegion(G_D12_CmdList *cl, u64 size) { - GPU_D12_SharedState *g = &GPU_D12_shared_state; - GPU_QueueKind queue_kind = cl->queue_kind; - GPU_D12_Queue *queue = GPU_D12_QueueFromKind(queue_kind); - GPU_D12_StagingRegionNode *result = 0; + G_D12_SharedState *g = &G_D12_shared_state; + G_QueueKind queue_kind = cl->queue_kind; + G_D12_Queue *queue = G_D12_QueueFromKind(queue_kind); + G_D12_StagingRegionNode *result = 0; Lock lock = LockE(&queue->staging_mutex); { - GPU_D12_StagingHeap *heap = queue->staging_heap; + G_D12_StagingHeap *heap = queue->staging_heap; i64 completed = ID3D12Fence_GetCompletedValue(queue->commit_fence); /* Find first completed region with matching size. @@ -1309,16 +1309,16 @@ GPU_D12_StagingRegionNode *GPU_D12_PushStagingRegion(GPU_D12_CmdList *cl, u64 si * u64/i64 max until cl submission actually sets value */ /* Find region with large enough size */ - GPU_D12_StagingRegionNode *match = 0; + G_D12_StagingRegionNode *match = 0; if (heap && heap->size >= size) { - GPU_D12_StagingRegionNode *r = heap->head_region_node; + G_D12_StagingRegionNode *r = heap->head_region_node; for (;;) { b32 is_completed = completed >= Atomic64Fetch(&r->completion_target); if (is_completed) { - GPU_D12_StagingRegionNode *next = r->next; + G_D12_StagingRegionNode *next = r->next; u64 region_size = 0; if (next->pos > r->pos) { @@ -1331,7 +1331,7 @@ GPU_D12_StagingRegionNode *GPU_D12_PushStagingRegion(GPU_D12_CmdList *cl, u64 si if (region_size < size) { - GPU_D12_StagingRegionNode *prev = r->prev; + G_D12_StagingRegionNode *prev = r->prev; b32 prev_is_completed = completed >= Atomic64Fetch(&prev->completion_target); if (prev_is_completed && prev->pos < r->pos) { @@ -1376,7 +1376,7 @@ GPU_D12_StagingRegionNode *GPU_D12_PushStagingRegion(GPU_D12_CmdList *cl, u64 si /* Create new heap */ { Arena *arena = AcquireArena(Gibi(1)); - heap = PushStruct(arena, GPU_D12_StagingHeap); + heap = PushStruct(arena, G_D12_StagingHeap); heap->arena = arena; heap->size = new_heap_size; @@ -1429,7 +1429,7 @@ GPU_D12_StagingRegionNode *GPU_D12_PushStagingRegion(GPU_D12_CmdList *cl, u64 si } /* Create initial region */ - match = PushStruct(heap->arena, GPU_D12_StagingRegionNode); + match = PushStruct(heap->arena, G_D12_StagingRegionNode); match->heap = heap; match->next = match; match->prev = match; @@ -1438,7 +1438,7 @@ GPU_D12_StagingRegionNode *GPU_D12_PushStagingRegion(GPU_D12_CmdList *cl, u64 si /* Split extra region space */ { - GPU_D12_StagingRegionNode *next = match->next; + G_D12_StagingRegionNode *next = match->next; u64 region_size = 0; if (next->pos > match->pos) { @@ -1451,14 +1451,14 @@ GPU_D12_StagingRegionNode *GPU_D12_PushStagingRegion(GPU_D12_CmdList *cl, u64 si if (region_size > size) { - GPU_D12_StagingRegionNode *new_next = heap->first_free_region_node; + G_D12_StagingRegionNode *new_next = heap->first_free_region_node; if (new_next) { SllStackPop(heap->first_free_region_node); } else { - new_next = PushStruct(heap->arena, GPU_D12_StagingRegionNode); + new_next = PushStruct(heap->arena, G_D12_StagingRegionNode); } new_next->next = next; new_next->prev = match; @@ -1483,10 +1483,10 @@ GPU_D12_StagingRegionNode *GPU_D12_PushStagingRegion(GPU_D12_CmdList *cl, u64 si //- Command list -GPU_CommandListHandle GPU_PrepareCommandList(GPU_QueueKind queue) +G_CommandListHandle G_PrepareCommandList(G_QueueKind queue) { - GPU_D12_SharedState *g = &GPU_D12_shared_state; - GPU_D12_CmdList *cl = 0; + G_D12_SharedState *g = &G_D12_shared_state; + G_D12_CmdList *cl = 0; Lock lock = LockE(&g->free_cmd_lists_mutex); { cl = g->first_free_cmd_list; @@ -1498,37 +1498,37 @@ GPU_CommandListHandle GPU_PrepareCommandList(GPU_QueueKind queue) else { Arena *perm = PermArena(); - cl = PushStruct(perm, GPU_D12_CmdList); + cl = PushStruct(perm, G_D12_CmdList); } } Unlock(&lock); cl->queue_kind = queue; - return GPU_D12_MakeHandle(GPU_CommandListHandle, cl); + return G_D12_MakeHandle(G_CommandListHandle, cl); } -void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_count, GPU_FenceOp *fence_ops) +void G_CommitCommandListEx(G_CommandListHandle cl_handle, u64 fence_ops_count, G_FenceOp *fence_ops) { - GPU_D12_SharedState *g = &GPU_D12_shared_state; - GPU_D12_CmdList *cl = GPU_D12_CmdListFromHandle(cl_handle); - GPU_QueueKind queue_kind = cl->queue_kind; - GPU_D12_Queue *queue = GPU_D12_QueueFromKind(queue_kind); + G_D12_SharedState *g = &G_D12_shared_state; + G_D12_CmdList *cl = G_D12_CmdListFromHandle(cl_handle); + G_QueueKind queue_kind = cl->queue_kind; + G_D12_Queue *queue = G_D12_QueueFromKind(queue_kind); TempArena scratch = BeginScratchNoConflict(); /* Begin dx12 command list */ - GPU_D12_RawCommandList *rcl = GPU_D12_PrepareRawCommandList(queue_kind); + G_D12_RawCommandList *rcl = G_D12_PrepareRawCommandList(queue_kind); ID3D12GraphicsCommandList7 *d3d_cl = rcl->d3d_cl; /* Pipeline state */ b32 graphics_rootsig_set = 0; b32 compute_rootsig_set = 0; b32 descriptor_heaps_set = 0; - GPU_D12_Pipeline *bound_pipeline = 0; + G_D12_Pipeline *bound_pipeline = 0; /* Constants state */ - u64 slotted_constants[GPU_D12_NumShaderConstants]; - u64 bound_compute_constants[GPU_D12_NumShaderConstants]; - u64 bound_graphics_constants[GPU_D12_NumShaderConstants]; + u64 slotted_constants[G_D12_NumShaderConstants]; + u64 bound_compute_constants[G_D12_NumShaderConstants]; + u64 bound_graphics_constants[G_D12_NumShaderConstants]; for (i32 i = 0; i < countof(slotted_constants); ++i) { slotted_constants[i] = 0; } /* Zero initialze all constant slots */ for (i32 i = 0; i < countof(bound_compute_constants); ++i) { bound_compute_constants[i] = U64Max; } for (i32 i = 0; i < countof(bound_graphics_constants); ++i) { bound_graphics_constants[i] = U64Max; } @@ -1538,16 +1538,16 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun D3D12_RECT bound_scissor = ZI; D3D_PRIMITIVE_TOPOLOGY bound_primitive_topology = -1; D3D12_INDEX_BUFFER_VIEW bound_ibv = ZI; - u64 bound_render_target_uids[GPU_MaxRenderTargets] = ZI; + u64 bound_render_target_uids[G_MaxRenderTargets] = ZI; u64 bound_render_clear_target_uid = 0; /* Flatten command chunks */ u64 cmds_count = 0; - GPU_D12_Cmd *cmds = PushStructsNoZero(scratch.arena, GPU_D12_Cmd, cl->cmds_count); + G_D12_Cmd *cmds = PushStructsNoZero(scratch.arena, G_D12_Cmd, cl->cmds_count); { /* Flatten command chunks */ { - for (GPU_D12_CmdChunk *chunk = cl->first_cmd_chunk; chunk; chunk = chunk->next) + for (G_D12_CmdChunk *chunk = cl->first_cmd_chunk; chunk; chunk = chunk->next) { for (u64 cmd_chunk_idx = 0; cmd_chunk_idx < chunk->cmds_count; ++cmd_chunk_idx) { @@ -1559,7 +1559,7 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun { Lock lock = LockE(&g->free_cmd_chunks_mutex); { - for (GPU_D12_CmdChunk *chunk = cl->first_cmd_chunk; chunk; chunk = chunk->next) + for (G_D12_CmdChunk *chunk = cl->first_cmd_chunk; chunk; chunk = chunk->next) { chunk->next = g->first_free_cmd_chunk; g->first_free_cmd_chunk = chunk; @@ -1573,10 +1573,10 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun { u64 cmd_idx = 0; u64 batch_gen = 0; - GPU_D12_Cmd *prev_barrier_cmd = 0; + G_D12_Cmd *prev_barrier_cmd = 0; while (cmd_idx < cmds_count) { - GPU_D12_Cmd *cmd = &cmds[cmd_idx]; + G_D12_Cmd *cmd = &cmds[cmd_idx]; switch (cmd->kind) { /* Batch-interrupting cmds */ @@ -1587,12 +1587,12 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun } break; /* Non-batch-interrupting cmds */ - case GPU_D12_CmdKind_Constant: + case G_D12_CmdKind_Constant: { cmd_idx += 1; } break; - case GPU_D12_CmdKind_Barrier: + case G_D12_CmdKind_Barrier: { /* Determine 'before' state from lookup */ if (prev_barrier_cmd) @@ -1624,7 +1624,7 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun u64 cmd_idx = 0; while (cmd_idx < cmds_count) { - GPU_D12_Cmd *cmd = &cmds[cmd_idx]; + G_D12_Cmd *cmd = &cmds[cmd_idx]; switch (cmd->kind) { default: @@ -1633,7 +1633,7 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun } break; //- Constant - case GPU_D12_CmdKind_Constant: + case G_D12_CmdKind_Constant: { i32 slot = cmd->constant.slot; u32 value = cmd->constant.value; @@ -1646,7 +1646,7 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun //- Barrier - case GPU_D12_CmdKind_Barrier: + case G_D12_CmdKind_Barrier: { batch_barrier_idx_opl = cmd_idx + 1; @@ -1662,23 +1662,23 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun D3D12_GLOBAL_BARRIER *global_barriers = PushStructs(scratch.arena, D3D12_GLOBAL_BARRIER, (batch_barrier_idx_opl - batch_barrier_idx_start)); for (u64 barrier_cmd_idx = batch_barrier_idx_start; barrier_cmd_idx < batch_barrier_idx_opl; ++barrier_cmd_idx) { - GPU_D12_Cmd *barrier_cmd = &cmds[barrier_cmd_idx]; - if (barrier_cmd->kind == GPU_D12_CmdKind_Barrier) + G_D12_Cmd *barrier_cmd = &cmds[barrier_cmd_idx]; + if (barrier_cmd->kind == G_D12_CmdKind_Barrier) { - GPU_BarrierDesc desc = barrier_cmd->barrier.desc; - GPU_D12_Resource *resource = GPU_D12_ResourceFromHandle(desc.resource); + G_BarrierDesc desc = barrier_cmd->barrier.desc; + G_D12_Resource *resource = G_D12_ResourceFromHandle(desc.resource); D3D12_BARRIER_TYPE barrier_type = resource->is_texture ? D3D12_BARRIER_TYPE_TEXTURE : D3D12_BARRIER_TYPE_BUFFER; /* Translate gpu barrier kind -> d3d barrier fields */ - D3D12_BARRIER_SYNC sync_before = GPU_D12_BarrierSyncFromStages(desc.sync_prev); - D3D12_BARRIER_SYNC sync_after = GPU_D12_BarrierSyncFromStages(desc.sync_next); - D3D12_BARRIER_ACCESS access_before = GPU_D12_BarrierAccessFromAccesses(desc.access_prev); - D3D12_BARRIER_ACCESS access_after = GPU_D12_BarrierAccessFromAccesses(desc.access_next); + D3D12_BARRIER_SYNC sync_before = G_D12_BarrierSyncFromStages(desc.sync_prev); + D3D12_BARRIER_SYNC sync_after = G_D12_BarrierSyncFromStages(desc.sync_next); + D3D12_BARRIER_ACCESS access_before = G_D12_BarrierAccessFromAccesses(desc.access_prev); + D3D12_BARRIER_ACCESS access_after = G_D12_BarrierAccessFromAccesses(desc.access_next); D3D12_BARRIER_LAYOUT layout_before = resource->texture_layout; D3D12_BARRIER_LAYOUT layout_after = resource->texture_layout; - if (desc.layout != GPU_Layout_NoChange) + if (desc.layout != G_Layout_NoChange) { - layout_after = GPU_D12_BarrierLayoutFromLayout(desc.layout); + layout_after = G_D12_BarrierLayoutFromLayout(desc.layout); resource->texture_layout = layout_after; } @@ -1761,7 +1761,7 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun //- Copy bytes - case GPU_D12_CmdKind_CopyBytes: + case G_D12_CmdKind_CopyBytes: { u64 src_offset = cmd->copy_bytes.src_copy_range.min; u64 copy_size = cmd->copy_bytes.src_copy_range.max - cmd->copy_bytes.src_copy_range.min; @@ -1776,10 +1776,10 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun //- Copy texels - case GPU_D12_CmdKind_CopyTexels: + case G_D12_CmdKind_CopyTexels: { - GPU_D12_Resource *dst = cmd->copy_texels.dst; - GPU_D12_Resource *src = cmd->copy_texels.src; + G_D12_Resource *dst = cmd->copy_texels.dst; + G_D12_Resource *src = cmd->copy_texels.src; D3D12_TEXTURE_COPY_LOCATION dst_loc = cmd->copy_texels.dst_loc; D3D12_TEXTURE_COPY_LOCATION src_loc = cmd->copy_texels.src_loc; Vec3I32 dst_offset = cmd->copy_texels.dst_offset; @@ -1795,7 +1795,7 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun src_box.back = src_copy_range.p1.z; } - if (dst->flags & GPU_ResourceFlag_AllowDepthStencil) + if (dst->flags & G_ResourceFlag_AllowDepthStencil) { /* Depth-stencil textures must have src box & dst offset set to 0 * https://learn.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12graphicscommandlist-copytextureregion @@ -1812,13 +1812,13 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun //- Compute - case GPU_D12_CmdKind_Compute: + case G_D12_CmdKind_Compute: { - GPU_D12_Pipeline *pipeline = 0; + G_D12_Pipeline *pipeline = 0; { - GPU_D12_PipelineDesc pipeline_desc = ZI; + G_D12_PipelineDesc pipeline_desc = ZI; pipeline_desc.cs = cmd->compute.cs; - pipeline = GPU_D12_PipelineFromDesc(pipeline_desc); + pipeline = G_D12_PipelineFromDesc(pipeline_desc); } if (pipeline) @@ -1827,8 +1827,8 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun if (!descriptor_heaps_set) { ID3D12DescriptorHeap *heaps[] = { - g->descriptor_heaps[GPU_D12_DescriptorHeapKind_CbvSrvUav].d3d_heap, - g->descriptor_heaps[GPU_D12_DescriptorHeapKind_Sampler].d3d_heap, + g->descriptor_heaps[G_D12_DescriptorHeapKind_CbvSrvUav].d3d_heap, + g->descriptor_heaps[G_D12_DescriptorHeapKind_Sampler].d3d_heap, }; ID3D12GraphicsCommandList_SetDescriptorHeaps(d3d_cl, countof(heaps), heaps); descriptor_heaps_set = 1; @@ -1867,11 +1867,11 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun //- Rasterize - case GPU_D12_CmdKind_Rasterize: + case G_D12_CmdKind_Rasterize: { - GPU_D12_Pipeline *pipeline = 0; + G_D12_Pipeline *pipeline = 0; { - GPU_D12_PipelineDesc pipeline_desc = ZI; + G_D12_PipelineDesc pipeline_desc = ZI; pipeline_desc.vs = cmd->rasterize.vs; pipeline_desc.ps = cmd->rasterize.ps; { @@ -1879,42 +1879,42 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun switch (cmd->rasterize.mode) { default: Assert(0); break; - case GPU_RasterMode_PointList: pipeline_desc.topology_type = D3D12_PRIMITIVE_TOPOLOGY_TYPE_POINT; break; - case GPU_RasterMode_LineList: pipeline_desc.topology_type = D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE; break; - case GPU_RasterMode_LineStrip: pipeline_desc.topology_type = D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE; break; - case GPU_RasterMode_TriangleList: pipeline_desc.topology_type = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE; break; - case GPU_RasterMode_TriangleStrip: pipeline_desc.topology_type = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE; break; - case GPU_RasterMode_WireTriangleList: pipeline_desc.topology_type = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE; break; - case GPU_RasterMode_WireTriangleStrip: pipeline_desc.topology_type = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE; break; + case G_RasterMode_PointList: pipeline_desc.topology_type = D3D12_PRIMITIVE_TOPOLOGY_TYPE_POINT; break; + case G_RasterMode_LineList: pipeline_desc.topology_type = D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE; break; + case G_RasterMode_LineStrip: pipeline_desc.topology_type = D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE; break; + case G_RasterMode_TriangleList: pipeline_desc.topology_type = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE; break; + case G_RasterMode_TriangleStrip: pipeline_desc.topology_type = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE; break; + case G_RasterMode_WireTriangleList: pipeline_desc.topology_type = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE; break; + case G_RasterMode_WireTriangleStrip: pipeline_desc.topology_type = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE; break; } } - if (cmd->rasterize.mode == GPU_RasterMode_WireTriangleList || cmd->rasterize.mode == GPU_RasterMode_WireTriangleStrip) + if (cmd->rasterize.mode == G_RasterMode_WireTriangleList || cmd->rasterize.mode == G_RasterMode_WireTriangleStrip) { pipeline_desc.is_wireframe = 1; } for (u32 i = 0; i < countof(cmd->rasterize.render_targets); ++i) { - GPU_D12_Resource *rt = cmd->rasterize.render_targets[i]; + G_D12_Resource *rt = cmd->rasterize.render_targets[i]; if (rt) { pipeline_desc.render_target_formats[i] = rt->texture_format; } else { - pipeline_desc.render_target_formats[i] = GPU_Format_Unknown; + pipeline_desc.render_target_formats[i] = G_Format_Unknown; } } - pipeline = GPU_D12_PipelineFromDesc(pipeline_desc); + pipeline = G_D12_PipelineFromDesc(pipeline_desc); } /* Create ibv */ u32 indices_count = 0; D3D12_INDEX_BUFFER_VIEW ibv = ZI; { - GPU_IndexBufferDesc desc = cmd->rasterize.index_buffer_desc; + G_IndexBufferDesc desc = cmd->rasterize.index_buffer_desc; if (desc.index_count > 0) { - GPU_D12_Resource *index_buffer_resource = GPU_D12_ResourceFromHandle(desc.resource); + G_D12_Resource *index_buffer_resource = G_D12_ResourceFromHandle(desc.resource); ibv.BufferLocation = index_buffer_resource->buffer_gpu_address; ibv.SizeInBytes = desc.index_size * desc.index_count; if (desc.index_size == 2) @@ -1941,8 +1941,8 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun if (!descriptor_heaps_set) { ID3D12DescriptorHeap *heaps[] = { - g->descriptor_heaps[GPU_D12_DescriptorHeapKind_CbvSrvUav].d3d_heap, - g->descriptor_heaps[GPU_D12_DescriptorHeapKind_Sampler].d3d_heap, + g->descriptor_heaps[G_D12_DescriptorHeapKind_CbvSrvUav].d3d_heap, + g->descriptor_heaps[G_D12_DescriptorHeapKind_Sampler].d3d_heap, }; ID3D12GraphicsCommandList_SetDescriptorHeaps(d3d_cl, countof(heaps), heaps); descriptor_heaps_set = 1; @@ -2014,13 +2014,13 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun switch (cmd->rasterize.mode) { default: Assert(0); break; - case GPU_RasterMode_PointList: topology = D3D_PRIMITIVE_TOPOLOGY_POINTLIST; break; - case GPU_RasterMode_LineList: topology = D3D_PRIMITIVE_TOPOLOGY_LINELIST; break; - case GPU_RasterMode_LineStrip: topology = D3D_PRIMITIVE_TOPOLOGY_LINESTRIP; break; - case GPU_RasterMode_TriangleList: topology = D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST; break; - case GPU_RasterMode_TriangleStrip: topology = D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP; break; - case GPU_RasterMode_WireTriangleList: topology = D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST; break; - case GPU_RasterMode_WireTriangleStrip: topology = D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP; break; + case G_RasterMode_PointList: topology = D3D_PRIMITIVE_TOPOLOGY_POINTLIST; break; + case G_RasterMode_LineList: topology = D3D_PRIMITIVE_TOPOLOGY_LINELIST; break; + case G_RasterMode_LineStrip: topology = D3D_PRIMITIVE_TOPOLOGY_LINESTRIP; break; + case G_RasterMode_TriangleList: topology = D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST; break; + case G_RasterMode_TriangleStrip: topology = D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP; break; + case G_RasterMode_WireTriangleList: topology = D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST; break; + case G_RasterMode_WireTriangleStrip: topology = D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP; break; } if (topology != bound_primitive_topology) { @@ -2041,12 +2041,12 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun u32 rtvs_count = 0; for (u32 i = 0; i < countof(cmd->rasterize.render_targets); ++i) { - GPU_D12_Resource *rt = cmd->rasterize.render_targets[i]; + G_D12_Resource *rt = cmd->rasterize.render_targets[i]; if (rt) { if (bound_render_target_uids[i] != rt->uid) { - GPU_D12_Descriptor *rtv_descriptor = rcl->rtv_descriptors[i]; + G_D12_Descriptor *rtv_descriptor = rcl->rtv_descriptors[i]; ID3D12Device_CreateRenderTargetView(g->device, rt->d3d_resource, 0, rtv_descriptor->handle); bound_render_target_uids[i] = rt->uid; om_dirty = 1; @@ -2060,7 +2060,7 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun } if (om_dirty) { - D3D12_CPU_DESCRIPTOR_HANDLE rtv_handles[GPU_MaxRenderTargets] = ZI; + D3D12_CPU_DESCRIPTOR_HANDLE rtv_handles[G_MaxRenderTargets] = ZI; for (u32 i = 0; i < rtvs_count; ++i) { rtv_handles[i] = rcl->rtv_descriptors[i]->handle; @@ -2078,9 +2078,9 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun //- Clear rtv - case GPU_D12_CmdKind_ClearRtv: + case G_D12_CmdKind_ClearRtv: { - GPU_D12_Resource *rt = cmd->clear_rtv.render_target; + G_D12_Resource *rt = cmd->clear_rtv.render_target; f32 clear_color[4] = ZI; { clear_color[0] = cmd->clear_rtv.color.x; @@ -2102,7 +2102,7 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun } /* End dx12 command list */ - GPU_D12_CommitRawCommandList(rcl); + G_D12_CommitRawCommandList(rcl); /* Free command list */ { @@ -2119,9 +2119,9 @@ void GPU_CommitCommandListEx(GPU_CommandListHandle cl_handle, u64 fence_ops_coun //- Arena -void GPU_ResetArena(GPU_CommandListHandle cl_handle, GPU_ArenaHandle arena_handle) +void G_ResetArena(G_CommandListHandle cl_handle, G_ArenaHandle arena_handle) { - GPU_D12_Arena *gpu_arena = GPU_D12_ArenaFromHandle(arena_handle); + G_D12_Arena *gpu_arena = G_D12_ArenaFromHandle(arena_handle); /* TODO */ @@ -2133,24 +2133,24 @@ void GPU_ResetArena(GPU_CommandListHandle cl_handle, GPU_ArenaHandle arena_handl //- Cpu -> Gpu copy -void GPU_CopyCpuToBuffer(GPU_CommandListHandle cl_handle, GPU_ResourceHandle dst_handle, u64 dst_offset, void *src, RngU64 src_copy_range) +void G_CopyCpuToBuffer(G_CommandListHandle cl_handle, G_ResourceHandle dst_handle, u64 dst_offset, void *src, RngU64 src_copy_range) { - GPU_D12_CmdList *cl = GPU_D12_CmdListFromHandle(cl_handle); + G_D12_CmdList *cl = G_D12_CmdListFromHandle(cl_handle); u64 copy_size = src_copy_range.max - src_copy_range.min; - GPU_D12_StagingRegionNode *region = GPU_D12_PushStagingRegion(cl, copy_size); + G_D12_StagingRegionNode *region = G_D12_PushStagingRegion(cl, copy_size); CopyBytes((u8 *)region->heap->mapped + region->pos, (u8 *)src + src_copy_range.min, copy_size); - GPU_CopyBufferToBuffer(cl_handle, + G_CopyBufferToBuffer(cl_handle, dst_handle, dst_offset, - GPU_D12_MakeHandle(GPU_ResourceHandle, ®ion->heap->resource), + G_D12_MakeHandle(G_ResourceHandle, ®ion->heap->resource), RNGU64(region->pos, copy_size)); } -void GPU_CopyCpuToTexture(GPU_CommandListHandle cl_handle, GPU_ResourceHandle dst_handle, Vec3I32 dst_offset, void *src, Vec3I32 src_dims, Rng3I32 src_copy_range) +void G_CopyCpuToTexture(G_CommandListHandle cl_handle, G_ResourceHandle dst_handle, Vec3I32 dst_offset, void *src, Vec3I32 src_dims, Rng3I32 src_copy_range) { - GPU_D12_SharedState *g = &GPU_D12_shared_state; - GPU_D12_CmdList *cl = GPU_D12_CmdListFromHandle(cl_handle); - GPU_D12_Resource *dst = GPU_D12_ResourceFromHandle(dst_handle); + G_D12_SharedState *g = &G_D12_shared_state; + G_D12_CmdList *cl = G_D12_CmdListFromHandle(cl_handle); + G_D12_Resource *dst = G_D12_ResourceFromHandle(dst_handle); Assert(dst->is_texture); Vec3I32 staged_dims = ZI; @@ -2177,7 +2177,7 @@ void GPU_CopyCpuToTexture(GPU_CommandListHandle cl_handle, GPU_ResourceHandle ds } /* Fill staging buffer */ - GPU_D12_StagingRegionNode *region = GPU_D12_PushStagingRegion(cl, footprint_size); + G_D12_StagingRegionNode *region = G_D12_PushStagingRegion(cl, footprint_size); { D3D12_RANGE read_range = ZI; u8 *dst_base = (u8 *)region->heap->mapped + region->pos + footprint.Offset; @@ -2195,31 +2195,31 @@ void GPU_CopyCpuToTexture(GPU_CommandListHandle cl_handle, GPU_ResourceHandle ds } } - GPU_CopyBufferToTexture(cl_handle, + G_CopyBufferToTexture(cl_handle, dst_handle, dst_offset, - GPU_D12_MakeHandle(GPU_ResourceHandle, ®ion->heap->resource), staged_dims, + G_D12_MakeHandle(G_ResourceHandle, ®ion->heap->resource), staged_dims, RNG3I32(VEC3I32(0, 0, 0), staged_dims)); } //- Gpu <-> Gpu copy -void GPU_CopyBufferToBuffer(GPU_CommandListHandle cl_handle, GPU_ResourceHandle dst_handle, u64 dst_offset, GPU_ResourceHandle src_handle, RngU64 src_copy_range) +void G_CopyBufferToBuffer(G_CommandListHandle cl_handle, G_ResourceHandle dst_handle, u64 dst_offset, G_ResourceHandle src_handle, RngU64 src_copy_range) { - GPU_D12_CmdList *cl = GPU_D12_CmdListFromHandle(cl_handle); - GPU_D12_Cmd *cmd = GPU_D12_PushCmd(cl); - cmd->kind = GPU_D12_CmdKind_CopyBytes; - cmd->copy_bytes.dst = GPU_D12_ResourceFromHandle(dst_handle); - cmd->copy_bytes.src = GPU_D12_ResourceFromHandle(src_handle); + G_D12_CmdList *cl = G_D12_CmdListFromHandle(cl_handle); + G_D12_Cmd *cmd = G_D12_PushCmd(cl); + cmd->kind = G_D12_CmdKind_CopyBytes; + cmd->copy_bytes.dst = G_D12_ResourceFromHandle(dst_handle); + cmd->copy_bytes.src = G_D12_ResourceFromHandle(src_handle); cmd->copy_bytes.dst_offset = dst_offset; cmd->copy_bytes.src_copy_range = src_copy_range; } -void GPU_CopyBufferToTexture(GPU_CommandListHandle cl_handle, GPU_ResourceHandle dst_handle, Vec3I32 dst_offset, GPU_ResourceHandle src_handle, Vec3I32 src_dims, Rng3I32 src_copy_range) +void G_CopyBufferToTexture(G_CommandListHandle cl_handle, G_ResourceHandle dst_handle, Vec3I32 dst_offset, G_ResourceHandle src_handle, Vec3I32 src_dims, Rng3I32 src_copy_range) { - GPU_D12_SharedState *g = &GPU_D12_shared_state; - GPU_D12_CmdList *cl = GPU_D12_CmdListFromHandle(cl_handle); - GPU_D12_Resource *dst = GPU_D12_ResourceFromHandle(dst_handle); - GPU_D12_Resource *src = GPU_D12_ResourceFromHandle(src_handle); + G_D12_SharedState *g = &G_D12_shared_state; + G_D12_CmdList *cl = G_D12_CmdListFromHandle(cl_handle); + G_D12_Resource *dst = G_D12_ResourceFromHandle(dst_handle); + G_D12_Resource *src = G_D12_ResourceFromHandle(src_handle); Assert(dst->is_texture); Assert(!src->is_texture); @@ -2249,8 +2249,8 @@ void GPU_CopyBufferToTexture(GPU_CommandListHandle cl_handle, GPU_ResourceHandle src_loc.PlacedFootprint = footprint; } - GPU_D12_Cmd *cmd = GPU_D12_PushCmd(cl); - cmd->kind = GPU_D12_CmdKind_CopyTexels; + G_D12_Cmd *cmd = G_D12_PushCmd(cl); + cmd->kind = G_D12_CmdKind_CopyTexels; cmd->copy_texels.dst = dst; cmd->copy_texels.src = src; cmd->copy_texels.dst_loc = dst_loc; @@ -2259,12 +2259,12 @@ void GPU_CopyBufferToTexture(GPU_CommandListHandle cl_handle, GPU_ResourceHandle cmd->copy_texels.src_copy_range = src_copy_range; } -void GPU_CopyTextureToTexture(GPU_CommandListHandle cl_handle, GPU_ResourceHandle dst_handle, Vec3I32 dst_offset, GPU_ResourceHandle src_handle, Rng3I32 src_copy_range) +void G_CopyTextureToTexture(G_CommandListHandle cl_handle, G_ResourceHandle dst_handle, Vec3I32 dst_offset, G_ResourceHandle src_handle, Rng3I32 src_copy_range) { - GPU_D12_SharedState *g = &GPU_D12_shared_state; - GPU_D12_CmdList *cl = GPU_D12_CmdListFromHandle(cl_handle); - GPU_D12_Resource *dst = GPU_D12_ResourceFromHandle(dst_handle); - GPU_D12_Resource *src = GPU_D12_ResourceFromHandle(src_handle); + G_D12_SharedState *g = &G_D12_shared_state; + G_D12_CmdList *cl = G_D12_CmdListFromHandle(cl_handle); + G_D12_Resource *dst = G_D12_ResourceFromHandle(dst_handle); + G_D12_Resource *src = G_D12_ResourceFromHandle(src_handle); Assert(dst->is_texture); Assert(src->is_texture); @@ -2281,8 +2281,8 @@ void GPU_CopyTextureToTexture(GPU_CommandListHandle cl_handle, GPU_ResourceHandl src_loc.SubresourceIndex = 0; } - GPU_D12_Cmd *cmd = GPU_D12_PushCmd(cl); - cmd->kind = GPU_D12_CmdKind_CopyTexels; + G_D12_Cmd *cmd = G_D12_PushCmd(cl); + cmd->kind = G_D12_CmdKind_CopyTexels; cmd->copy_texels.dst = dst; cmd->copy_texels.src = src; cmd->copy_texels.dst_loc = dst_loc; @@ -2291,7 +2291,7 @@ void GPU_CopyTextureToTexture(GPU_CommandListHandle cl_handle, GPU_ResourceHandl cmd->copy_texels.src_copy_range = src_copy_range; } -void GPU_CopyTextureToBuffer(GPU_CommandListHandle cl_handle, GPU_ResourceHandle dst_handle, Vec3I32 dst_offset, GPU_ResourceHandle src_handle, Rng3I32 src_copy_range) +void G_CopyTextureToBuffer(G_CommandListHandle cl_handle, G_ResourceHandle dst_handle, Vec3I32 dst_offset, G_ResourceHandle src_handle, Rng3I32 src_copy_range) { /* TODO */ Assert(0); @@ -2299,55 +2299,55 @@ void GPU_CopyTextureToBuffer(GPU_CommandListHandle cl_handle, GPU_ResourceHandle //- Constant -void GPU_SetConstant_(GPU_CommandListHandle cl_handle, i32 slot, void *src_32bit, u32 size) +void G_SetConstant_(G_CommandListHandle cl_handle, i32 slot, void *src_32bit, u32 size) { - GPU_D12_CmdList *cl = GPU_D12_CmdListFromHandle(cl_handle); - GPU_D12_Cmd *cmd = GPU_D12_PushCmd(cl); - cmd->kind = GPU_D12_CmdKind_Constant; + G_D12_CmdList *cl = G_D12_CmdListFromHandle(cl_handle); + G_D12_Cmd *cmd = G_D12_PushCmd(cl); + cmd->kind = G_D12_CmdKind_Constant; cmd->constant.slot = slot; CopyBytes(&cmd->constant.value, src_32bit, MinU32(size, 4)); } //- Barrier -void GPU_Sync(GPU_CommandListHandle cl_handle, GPU_BarrierDesc desc) +void G_Sync(G_CommandListHandle cl_handle, G_BarrierDesc desc) { - GPU_D12_CmdList *cl = GPU_D12_CmdListFromHandle(cl_handle); - GPU_D12_Cmd *cmd = GPU_D12_PushCmd(cl); - cmd->kind = GPU_D12_CmdKind_Barrier; + G_D12_CmdList *cl = G_D12_CmdListFromHandle(cl_handle); + G_D12_Cmd *cmd = G_D12_PushCmd(cl); + cmd->kind = G_D12_CmdKind_Barrier; cmd->barrier.desc = desc; } //- Compute -void GPU_Compute(GPU_CommandListHandle cl_handle, ComputeShader cs, Vec3I32 groups) +void G_Compute(G_CommandListHandle cl_handle, ComputeShader cs, Vec3I32 groups) { - GPU_D12_CmdList *cl = GPU_D12_CmdListFromHandle(cl_handle); - GPU_D12_Cmd *cmd = GPU_D12_PushCmd(cl); - cmd->kind = GPU_D12_CmdKind_Compute; + G_D12_CmdList *cl = G_D12_CmdListFromHandle(cl_handle); + G_D12_Cmd *cmd = G_D12_PushCmd(cl); + cmd->kind = G_D12_CmdKind_Compute; cmd->compute.cs = cs; cmd->compute.groups = groups; } //- Rasterize -void GPU_Rasterize(GPU_CommandListHandle cl_handle, +void G_Rasterize(G_CommandListHandle cl_handle, VertexShader vs, PixelShader ps, - u32 instances_count, GPU_IndexBufferDesc index_buffer, - u32 render_targets_count, GPU_ResourceHandle *render_targets, + u32 instances_count, G_IndexBufferDesc index_buffer, + u32 render_targets_count, G_ResourceHandle *render_targets, Rng3 viewport, Rng2 scissor, - GPU_RasterMode mode) + G_RasterMode mode) { - GPU_D12_CmdList *cl = GPU_D12_CmdListFromHandle(cl_handle); - GPU_D12_Cmd *cmd = GPU_D12_PushCmd(cl); - cmd->kind = GPU_D12_CmdKind_Rasterize; + G_D12_CmdList *cl = G_D12_CmdListFromHandle(cl_handle); + G_D12_Cmd *cmd = G_D12_PushCmd(cl); + cmd->kind = G_D12_CmdKind_Rasterize; cmd->rasterize.vs = vs; cmd->rasterize.ps = ps; cmd->rasterize.instances_count = instances_count; cmd->rasterize.index_buffer_desc = index_buffer; - for (u32 i = 0; i < MinU32(render_targets_count, GPU_MaxRenderTargets); ++i) + for (u32 i = 0; i < MinU32(render_targets_count, G_MaxRenderTargets); ++i) { - cmd->rasterize.render_targets[i] = GPU_D12_ResourceFromHandle(render_targets[i]); + cmd->rasterize.render_targets[i] = G_D12_ResourceFromHandle(render_targets[i]); } cmd->rasterize.viewport = viewport; cmd->rasterize.scissor = scissor; @@ -2356,24 +2356,24 @@ void GPU_Rasterize(GPU_CommandListHandle cl_handle, //- Clear -void GPU_ClearRenderTarget(GPU_CommandListHandle cl_handle, GPU_ResourceHandle resource_handle, Vec4 color) +void G_ClearRenderTarget(G_CommandListHandle cl_handle, G_ResourceHandle resource_handle, Vec4 color) { - GPU_D12_CmdList *cl = GPU_D12_CmdListFromHandle(cl_handle); - GPU_D12_Cmd *cmd = GPU_D12_PushCmd(cl); - cmd->kind = GPU_D12_CmdKind_ClearRtv; - cmd->clear_rtv.render_target = GPU_D12_ResourceFromHandle(resource_handle); + G_D12_CmdList *cl = G_D12_CmdListFromHandle(cl_handle); + G_D12_Cmd *cmd = G_D12_PushCmd(cl); + cmd->kind = G_D12_CmdKind_ClearRtv; + cmd->clear_rtv.render_target = G_D12_ResourceFromHandle(resource_handle); cmd->clear_rtv.color = color; } //////////////////////////////////////////////////////////// //~ @hookimpl Queue synchronization -void GPU_SyncQueue(GPU_QueueKind completion_queue_kind, GPU_QueueKind waiter_queue_kind) +void G_SyncQueue(G_QueueKind completion_queue_kind, G_QueueKind waiter_queue_kind) { if (completion_queue_kind != waiter_queue_kind) { - GPU_D12_Queue *completion_queue = GPU_D12_QueueFromKind(completion_queue_kind); - GPU_D12_Queue *waiter_queue = GPU_D12_QueueFromKind(waiter_queue_kind); + G_D12_Queue *completion_queue = G_D12_QueueFromKind(completion_queue_kind); + G_D12_Queue *waiter_queue = G_D12_QueueFromKind(waiter_queue_kind); ID3D12Fence *d3d_fence = completion_queue->commit_fence; u64 fence_target = 0; { @@ -2388,11 +2388,11 @@ void GPU_SyncQueue(GPU_QueueKind completion_queue_kind, GPU_QueueKind waiter_que } } -void GPU_SyncOtherQueues(GPU_QueueKind completion_queue_kind) +void G_SyncOtherQueues(G_QueueKind completion_queue_kind) { - if (GPU_IsMultiQueueEnabled) + if (G_IsMultiQueueEnabled) { - GPU_D12_Queue *completion_queue = GPU_D12_QueueFromKind(completion_queue_kind); + G_D12_Queue *completion_queue = G_D12_QueueFromKind(completion_queue_kind); ID3D12Fence *d3d_fence = completion_queue->commit_fence; u64 fence_target = 0; { @@ -2402,11 +2402,11 @@ void GPU_SyncOtherQueues(GPU_QueueKind completion_queue_kind) } if (ID3D12Fence_GetCompletedValue(d3d_fence) < fence_target) { - for (GPU_QueueKind waiter_queue_kind = 0; waiter_queue_kind < GPU_NumQueues; ++waiter_queue_kind) + for (G_QueueKind waiter_queue_kind = 0; waiter_queue_kind < G_NumQueues; ++waiter_queue_kind) { if (waiter_queue_kind != completion_queue_kind) { - GPU_D12_Queue *waiter_queue = GPU_D12_QueueFromKind(waiter_queue_kind); + G_D12_Queue *waiter_queue = G_D12_QueueFromKind(waiter_queue_kind); ID3D12CommandQueue_Wait(waiter_queue->d3d_queue, d3d_fence, fence_target); } } @@ -2417,11 +2417,11 @@ void GPU_SyncOtherQueues(GPU_QueueKind completion_queue_kind) //////////////////////////////////////////////////////////// //~ @hookimpl Map -// GPU_Mapped GPU_Map(GPU_Resource *gpu_r) +// G_Mapped G_Map(G_Resource *gpu_r) // { -// GPU_Mapped result = ZI; +// G_Mapped result = ZI; // result.resource = gpu_r; -// GPU_D12_Resource *r = (GPU_D12_Resource *)gpu_r; +// G_D12_Resource *r = (G_D12_Resource *)gpu_r; // D3D12_RANGE read_range = ZI; // HRESULT hr = ID3D12Resource_Map(r->d3d_resource, 0, &read_range, &result.mem); // if (FAILED(hr) || !result.mem) @@ -2432,18 +2432,18 @@ void GPU_SyncOtherQueues(GPU_QueueKind completion_queue_kind) // return result; // } -// void GPU_Unmap(GPU_Mapped m) +// void G_Unmap(G_Mapped m) // { -// GPU_D12_Resource *r = (GPU_D12_Resource *)m.resource; +// G_D12_Resource *r = (G_D12_Resource *)m.resource; // ID3D12Resource_Unmap(r->d3d_resource, 0, 0); // } -// void GPU_CopyBytesToFootprint(void *dst, void *src, GPU_Resource *footprint_reference) +// void G_CopyBytesToFootprint(void *dst, void *src, G_Resource *footprint_reference) // { -// GPU_D12_SharedState *g = &GPU_D12_shared_state; +// G_D12_SharedState *g = &G_D12_shared_state; // D3D12_RESOURCE_DESC desc = ZI; -// ID3D12Resource_GetDesc(((GPU_D12_Resource *)footprint_reference)->d3d_resource, &desc); +// ID3D12Resource_GetDesc(((G_D12_Resource *)footprint_reference)->d3d_resource, &desc); // u64 upload_size = 0; // u64 upload_row_size = 0; @@ -2476,10 +2476,10 @@ void GPU_SyncOtherQueues(GPU_QueueKind completion_queue_kind) //////////////////////////////////////////////////////////// //~ @hookimpl Statistics -GPU_Stats GPU_QueryStats(void) +G_Stats G_QueryStats(void) { - GPU_D12_SharedState *g = &GPU_D12_shared_state; - GPU_Stats result = ZI; + G_D12_SharedState *g = &G_D12_shared_state; + G_Stats result = ZI; { DXGI_QUERY_VIDEO_MEMORY_INFO info = ZI; IDXGIAdapter3_QueryVideoMemoryInfo(g->adapter, 0, DXGI_MEMORY_SEGMENT_GROUP_LOCAL, &info); @@ -2500,28 +2500,28 @@ GPU_Stats GPU_QueryStats(void) //////////////////////////////////////////////////////////// //~ @hookimpl Swapchain -GPU_SwapchainHandle GPU_AcquireSwapchain(u64 os_window_handle) +G_SwapchainHandle G_AcquireSwapchain(u64 os_window_handle) { - GPU_D12_Swapchain *swapchain = 0; + G_D12_Swapchain *swapchain = 0; { Arena *perm = PermArena(); - swapchain = PushStruct(perm, GPU_D12_Swapchain); + swapchain = PushStruct(perm, G_D12_Swapchain); } swapchain->window_hwnd = (HWND)os_window_handle; - return GPU_D12_MakeHandle(GPU_SwapchainHandle, swapchain); + return G_D12_MakeHandle(G_SwapchainHandle, swapchain); } -void GPU_ReleaseSwapchain(GPU_SwapchainHandle swapchain_handle) +void G_ReleaseSwapchain(G_SwapchainHandle swapchain_handle) { /* TODO */ } -GPU_ResourceHandle GPU_PrepareBackbuffer(GPU_SwapchainHandle swapchain_handle, GPU_Format format, Vec2I32 size) +G_ResourceHandle G_PrepareBackbuffer(G_SwapchainHandle swapchain_handle, G_Format format, Vec2I32 size) { - GPU_D12_SharedState *g = &GPU_D12_shared_state; - GPU_D12_Swapchain *swapchain = GPU_D12_SwapchainFromHandle(swapchain_handle); + G_D12_SharedState *g = &G_D12_shared_state; + G_D12_Swapchain *swapchain = G_D12_SwapchainFromHandle(swapchain_handle); size = VEC2I32(MaxI32(size.x, 1), MaxI32(size.y, 1)); - GPU_D12_Queue *direct_queue = GPU_D12_QueueFromKind(GPU_QueueKind_Direct); + G_D12_Queue *direct_queue = G_D12_QueueFromKind(G_QueueKind_Direct); /* Initialize swapchain */ if (!swapchain->d3d_swapchain) @@ -2537,15 +2537,15 @@ GPU_ResourceHandle GPU_PrepareBackbuffer(GPU_SwapchainHandle swapchain_handle, G if (SUCCEEDED(hr)) { DXGI_SWAP_CHAIN_DESC1 desc = ZI; - desc.Format = GPU_D12_DxgiFormatFromGpuFormat(format); + desc.Format = G_D12_DxgiFormatFromGpuFormat(format); desc.Width = size.x; desc.Height = size.y; desc.SampleDesc.Count = 1; desc.SampleDesc.Quality = 0; desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT; - desc.BufferCount = GPU_D12_SwapchainBufferCount; + desc.BufferCount = G_D12_SwapchainBufferCount; desc.Scaling = DXGI_SCALING_NONE; - desc.Flags = GPU_D12_SwapchainFlags; + desc.Flags = G_D12_SwapchainFlags; desc.AlphaMode = DXGI_ALPHA_MODE_IGNORE; desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_DISCARD; hr = IDXGIFactory2_CreateSwapChainForHwnd(g->factory, (IUnknown *)direct_queue->d3d_queue, swapchain->window_hwnd, &desc, 0, 0, &swapchain1); @@ -2568,9 +2568,9 @@ GPU_ResourceHandle GPU_PrepareBackbuffer(GPU_SwapchainHandle swapchain_handle, G /* Create waitable object */ { HANDLE waitable = 0; - if (SUCCEEDED(hr) && GPU_D12_FrameLatency > 0) + if (SUCCEEDED(hr) && G_D12_FrameLatency > 0) { - hr = IDXGISwapChain3_SetMaximumFrameLatency(swapchain->d3d_swapchain, GPU_D12_FrameLatency); + hr = IDXGISwapChain3_SetMaximumFrameLatency(swapchain->d3d_swapchain, G_D12_FrameLatency); waitable = IDXGISwapChain2_GetFrameLatencyWaitableObject(swapchain->d3d_swapchain); } swapchain->waitable = waitable; @@ -2612,7 +2612,7 @@ GPU_ResourceHandle GPU_PrepareBackbuffer(GPU_SwapchainHandle swapchain_handle, G /* Release backbuffers */ for (u32 i = 0; i < countof(swapchain->backbuffers); ++i) { - GPU_D12_Resource *backbuffer = &swapchain->backbuffers[i]; + G_D12_Resource *backbuffer = &swapchain->backbuffers[i]; if (backbuffer->d3d_resource) { ID3D12Resource_Release(backbuffer->d3d_resource); @@ -2621,7 +2621,7 @@ GPU_ResourceHandle GPU_PrepareBackbuffer(GPU_SwapchainHandle swapchain_handle, G } /* Resize buffers */ - hr = IDXGISwapChain_ResizeBuffers(swapchain->d3d_swapchain, 0, size.x, size.y, DXGI_FORMAT_UNKNOWN, GPU_D12_SwapchainFlags); + hr = IDXGISwapChain_ResizeBuffers(swapchain->d3d_swapchain, 0, size.x, size.y, DXGI_FORMAT_UNKNOWN, G_D12_SwapchainFlags); if (FAILED(hr)) { /* TODO: Don't panic */ @@ -2633,7 +2633,7 @@ GPU_ResourceHandle GPU_PrepareBackbuffer(GPU_SwapchainHandle swapchain_handle, G { for (u32 i = 0; i < countof(swapchain->backbuffers); ++i) { - GPU_D12_Resource *backbuffer = &swapchain->backbuffers[i]; + G_D12_Resource *backbuffer = &swapchain->backbuffers[i]; if (!backbuffer->d3d_resource) { ID3D12Resource *d3d_resource = 0; @@ -2646,7 +2646,7 @@ GPU_ResourceHandle GPU_PrepareBackbuffer(GPU_SwapchainHandle swapchain_handle, G ZeroStruct(backbuffer); backbuffer->d3d_resource = d3d_resource; backbuffer->uid = Atomic64FetchAdd(&g->resource_creation_gen.v, 1) + 1; - backbuffer->flags = GPU_ResourceFlag_AllowRenderTarget; + backbuffer->flags = G_ResourceFlag_AllowRenderTarget; backbuffer->is_texture = 1; backbuffer->texture_format = format; @@ -2672,23 +2672,23 @@ GPU_ResourceHandle GPU_PrepareBackbuffer(GPU_SwapchainHandle swapchain_handle, G } /* Grab current backbuffer */ - GPU_D12_Resource *cur_backbuffer = 0; + G_D12_Resource *cur_backbuffer = 0; { u32 backbuffer_idx = IDXGISwapChain3_GetCurrentBackBufferIndex(swapchain->d3d_swapchain); cur_backbuffer = &swapchain->backbuffers[backbuffer_idx]; } - return GPU_D12_MakeHandle(GPU_ResourceHandle, cur_backbuffer); + return G_D12_MakeHandle(G_ResourceHandle, cur_backbuffer); } -void GPU_CommitBackbuffer(GPU_ResourceHandle backbuffer_handle, i32 vsync) +void G_CommitBackbuffer(G_ResourceHandle backbuffer_handle, i32 vsync) { - GPU_D12_Resource *backbuffer = GPU_D12_ResourceFromHandle(backbuffer_handle); - GPU_D12_Swapchain *swapchain = backbuffer->swapchain; - GPU_D12_Queue *direct_queue = GPU_D12_QueueFromKind(GPU_QueueKind_Direct); + G_D12_Resource *backbuffer = G_D12_ResourceFromHandle(backbuffer_handle); + G_D12_Swapchain *swapchain = backbuffer->swapchain; + G_D12_Queue *direct_queue = G_D12_QueueFromKind(G_QueueKind_Direct); u32 present_flags = 0; - if (GPU_D12_TearingIsAllowed && vsync == 0) + if (G_D12_TearingIsAllowed && vsync == 0) { present_flags |= DXGI_PRESENT_ALLOW_TEARING; } @@ -2718,10 +2718,10 @@ void GPU_CommitBackbuffer(GPU_ResourceHandle backbuffer_handle, i32 vsync) //////////////////////////////////////////////////////////// //~ Workers -void GPU_D12_WorkerEntry(WaveLaneCtx *lane) +void G_D12_WorkerEntry(WaveLaneCtx *lane) { - GPU_QueueKind queue_kind = (GPU_QueueKind)lane->wave->udata; - GPU_D12_Queue *queue = GPU_D12_QueueFromKind(queue_kind); + G_QueueKind queue_kind = (G_QueueKind)lane->wave->udata; + G_D12_Queue *queue = G_D12_QueueFromKind(queue_kind); // for (;;) // { diff --git a/src/gpu/gpu_dx12/gpu_dx12_core.h b/src/gpu/gpu_dx12/gpu_dx12_core.h index bceacf9a..036b477e 100644 --- a/src/gpu/gpu_dx12/gpu_dx12_core.h +++ b/src/gpu/gpu_dx12/gpu_dx12_core.h @@ -13,61 +13,61 @@ //////////////////////////////////////////////////////////// //~ Tweakable defines -#define GPU_D12_TearingIsAllowed 1 -#define GPU_D12_FrameLatency 1 -#define GPU_D12_SwapchainBufferCount 3 -#define GPU_D12_SwapchainFlags (((GPU_D12_TearingIsAllowed != 0) * DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING) \ - | ((GPU_D12_FrameLatency != 0) * DXGI_SWAP_CHAIN_FLAG_FRAME_LATENCY_WAITABLE_OBJECT)) +#define G_D12_TearingIsAllowed 1 +#define G_D12_FrameLatency 1 +#define G_D12_SwapchainBufferCount 3 +#define G_D12_SwapchainFlags (((G_D12_TearingIsAllowed != 0) * DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING) \ + | ((G_D12_FrameLatency != 0) * DXGI_SWAP_CHAIN_FLAG_FRAME_LATENCY_WAITABLE_OBJECT)) -#define GPU_D12_MaxCbvSrvUavDescriptors (1024 * 128) -#define GPU_D12_MaxSamplerDescriptors (1024 * 1) -#define GPU_D12_MaxRtvDescriptors (1024 * 64) +#define G_D12_MaxCbvSrvUavDescriptors (1024 * 128) +#define G_D12_MaxSamplerDescriptors (1024 * 1) +#define G_D12_MaxRtvDescriptors (1024 * 64) //////////////////////////////////////////////////////////// //~ Pipeline types -Struct(GPU_D12_PipelineDesc) +Struct(G_D12_PipelineDesc) { VertexShader vs; PixelShader ps; ComputeShader cs; b32 is_wireframe; D3D12_PRIMITIVE_TOPOLOGY_TYPE topology_type; - GPU_Format render_target_formats[GPU_MaxRenderTargets]; + G_Format render_target_formats[G_MaxRenderTargets]; }; -Struct(GPU_D12_Pipeline) +Struct(G_D12_Pipeline) { - GPU_D12_Pipeline *next_in_bin; + G_D12_Pipeline *next_in_bin; u64 hash; - GPU_D12_PipelineDesc desc; + G_D12_PipelineDesc desc; ID3D12PipelineState *pso; b32 ok; String error; }; -Struct(GPU_D12_PipelineBin) +Struct(G_D12_PipelineBin) { Mutex mutex; - GPU_D12_Pipeline *first; + G_D12_Pipeline *first; }; //////////////////////////////////////////////////////////// //~ Descriptor types -Enum(GPU_D12_DescriptorHeapKind) +Enum(G_D12_DescriptorHeapKind) { - GPU_D12_DescriptorHeapKind_CbvSrvUav, - GPU_D12_DescriptorHeapKind_Rtv, - GPU_D12_DescriptorHeapKind_Sampler, + G_D12_DescriptorHeapKind_CbvSrvUav, + G_D12_DescriptorHeapKind_Rtv, + G_D12_DescriptorHeapKind_Sampler, - GPU_D12_DescriptorHeapKind_Count + G_D12_DescriptorHeapKind_Count }; -Struct(GPU_D12_DescriptorHeap) +Struct(G_D12_DescriptorHeap) { Arena *descriptors_arena; @@ -77,34 +77,34 @@ Struct(GPU_D12_DescriptorHeap) D3D12_CPU_DESCRIPTOR_HANDLE start_handle; Mutex mutex; - struct GPU_D12_Descriptor *first_free; + struct G_D12_Descriptor *first_free; u32 max_count; }; -Struct(GPU_D12_Descriptor) +Struct(G_D12_Descriptor) { - GPU_D12_Descriptor *next; - GPU_D12_Descriptor *prev; + G_D12_Descriptor *next; + G_D12_Descriptor *prev; u64 queue_commit_target; - GPU_D12_DescriptorHeap *heap; + G_D12_DescriptorHeap *heap; D3D12_CPU_DESCRIPTOR_HANDLE handle; u32 index; }; -Struct(GPU_D12_DescriptorList) +Struct(G_D12_DescriptorList) { - GPU_D12_Descriptor *first; - GPU_D12_Descriptor *last; + G_D12_Descriptor *first; + G_D12_Descriptor *last; }; //////////////////////////////////////////////////////////// //~ Arena types -Struct(GPU_D12_Arena) +Struct(G_D12_Arena) { Arena *arena; - GPU_D12_DescriptorList committed_descriptors_by_heap_and_queue[GPU_D12_DescriptorHeapKind_Count][GPU_NumQueues]; + G_D12_DescriptorList committed_descriptors_by_heap_and_queue[G_D12_DescriptorHeapKind_Count][G_NumQueues]; /* TODO: * To support D3D12_RESOURCE_HEAP_TIER_1 devices, create separate heaps for: @@ -120,13 +120,13 @@ Struct(GPU_D12_Arena) //////////////////////////////////////////////////////////// //~ Resource types -Struct(GPU_D12_Resource) +Struct(G_D12_Resource) { - GPU_D12_Resource *next_free; + G_D12_Resource *next_free; ID3D12Resource *d3d_resource; u64 uid; - GPU_ResourceFlag flags; + G_ResourceFlag flags; /* Buffer info */ u64 buffer_size; @@ -135,41 +135,41 @@ Struct(GPU_D12_Resource) /* Texture info */ b32 is_texture; - GPU_Format texture_format; + G_Format texture_format; Vec3I32 texture_dims; i32 texture_mip_levels; D3D12_BARRIER_LAYOUT texture_layout; /* Sampler info */ - GPU_SamplerResourceDesc sampler_desc; + G_SamplerResourceDesc sampler_desc; /* Backbuffer info */ - struct GPU_D12_Swapchain *swapchain; + struct G_D12_Swapchain *swapchain; }; //////////////////////////////////////////////////////////// //~ Staging types -Struct(GPU_D12_StagingHeap) +Struct(G_D12_StagingHeap) { Arena *arena; - GPU_D12_Resource resource; + G_D12_Resource resource; void *mapped; u64 size; - struct GPU_D12_StagingRegionNode *head_region_node; - struct GPU_D12_StagingRegionNode *first_free_region_node; + struct G_D12_StagingRegionNode *head_region_node; + struct G_D12_StagingRegionNode *first_free_region_node; }; -Struct(GPU_D12_StagingRegionNode) +Struct(G_D12_StagingRegionNode) { - GPU_D12_StagingHeap *heap; + G_D12_StagingHeap *heap; /* Heap links (requires heap lock to read) */ - GPU_D12_StagingRegionNode *prev; - GPU_D12_StagingRegionNode *next; + G_D12_StagingRegionNode *prev; + G_D12_StagingRegionNode *next; /* Region info */ Atomic64 completion_target; @@ -179,28 +179,28 @@ Struct(GPU_D12_StagingRegionNode) //////////////////////////////////////////////////////////// //~ Command queue types -Struct(GPU_D12_CommandQueueDesc) +Struct(G_D12_CommandQueueDesc) { D3D12_COMMAND_LIST_TYPE type; D3D12_COMMAND_QUEUE_PRIORITY priority; }; -Struct(GPU_D12_Queue) +Struct(G_D12_Queue) { ID3D12CommandQueue *d3d_queue; - GPU_D12_CommandQueueDesc desc; + G_D12_CommandQueueDesc desc; Mutex commit_mutex; ID3D12Fence *commit_fence; u64 commit_fence_target; /* Raw command lists */ - struct GPU_D12_RawCommandList *first_committed_cl; - struct GPU_D12_RawCommandList *last_committed_cl; + struct G_D12_RawCommandList *first_committed_cl; + struct G_D12_RawCommandList *last_committed_cl; /* Staging heap */ Mutex staging_mutex; - GPU_D12_StagingHeap *staging_heap; + G_D12_StagingHeap *staging_heap; Fence sync_fence; }; @@ -208,10 +208,10 @@ Struct(GPU_D12_Queue) //////////////////////////////////////////////////////////// //~ Raw command list types -Struct(GPU_D12_RawCommandList) +Struct(G_D12_RawCommandList) { - GPU_D12_Queue *queue; - GPU_D12_RawCommandList *next; + G_D12_Queue *queue; + G_D12_RawCommandList *next; u64 commit_fence_target; @@ -219,30 +219,30 @@ Struct(GPU_D12_RawCommandList) ID3D12GraphicsCommandList7 *d3d_cl; /* Direct queue command lists keep a constant list of CPU-only descriptors */ - GPU_D12_Descriptor *rtv_descriptors[GPU_MaxRenderTargets]; - GPU_D12_Descriptor *rtv_clear_descriptor; + G_D12_Descriptor *rtv_descriptors[G_MaxRenderTargets]; + G_D12_Descriptor *rtv_clear_descriptor; }; //////////////////////////////////////////////////////////// //~ Command list types -#define GPU_D12_CmdsPerChunk 256 +#define G_D12_CmdsPerChunk 256 -Enum(GPU_D12_CmdKind) +Enum(G_D12_CmdKind) { - GPU_D12_CmdKind_None, - GPU_D12_CmdKind_Barrier, - GPU_D12_CmdKind_Constant, - GPU_D12_CmdKind_CopyBytes, - GPU_D12_CmdKind_CopyTexels, - GPU_D12_CmdKind_Compute, - GPU_D12_CmdKind_Rasterize, - GPU_D12_CmdKind_ClearRtv, + G_D12_CmdKind_None, + G_D12_CmdKind_Barrier, + G_D12_CmdKind_Constant, + G_D12_CmdKind_CopyBytes, + G_D12_CmdKind_CopyTexels, + G_D12_CmdKind_Compute, + G_D12_CmdKind_Rasterize, + G_D12_CmdKind_ClearRtv, }; -Struct(GPU_D12_Cmd) +Struct(G_D12_Cmd) { - GPU_D12_CmdKind kind; + G_D12_CmdKind kind; b32 skip; union { @@ -254,7 +254,7 @@ Struct(GPU_D12_Cmd) struct { - GPU_BarrierDesc desc; + G_BarrierDesc desc; /* Post-batch data */ b32 is_end_of_batch; @@ -263,16 +263,16 @@ Struct(GPU_D12_Cmd) struct { - GPU_D12_Resource *dst; - GPU_D12_Resource *src; + G_D12_Resource *dst; + G_D12_Resource *src; u64 dst_offset; RngU64 src_copy_range; } copy_bytes; struct { - GPU_D12_Resource *dst; - GPU_D12_Resource *src; + G_D12_Resource *dst; + G_D12_Resource *src; D3D12_TEXTURE_COPY_LOCATION dst_loc; D3D12_TEXTURE_COPY_LOCATION src_loc; Vec3I32 dst_offset; @@ -290,36 +290,36 @@ Struct(GPU_D12_Cmd) VertexShader vs; PixelShader ps; u32 instances_count; - GPU_IndexBufferDesc index_buffer_desc; - GPU_D12_Resource *render_targets[GPU_MaxRenderTargets]; + G_IndexBufferDesc index_buffer_desc; + G_D12_Resource *render_targets[G_MaxRenderTargets]; Rng3 viewport; Rng2 scissor; - GPU_RasterMode mode; + G_RasterMode mode; } rasterize; struct { - GPU_D12_Resource *render_target; + G_D12_Resource *render_target; Vec4 color; } clear_rtv; }; }; -Struct(GPU_D12_CmdChunk) +Struct(G_D12_CmdChunk) { - GPU_D12_CmdChunk *next; - struct GPU_D12_CmdList *cl; - GPU_D12_Cmd *cmds; + G_D12_CmdChunk *next; + struct G_D12_CmdList *cl; + G_D12_Cmd *cmds; u64 cmds_count; }; -Struct(GPU_D12_CmdList) +Struct(G_D12_CmdList) { - GPU_QueueKind queue_kind; - GPU_D12_CmdList *next; + G_QueueKind queue_kind; + G_D12_CmdList *next; - GPU_D12_CmdChunk *first_cmd_chunk; - GPU_D12_CmdChunk *last_cmd_chunk; + G_D12_CmdChunk *first_cmd_chunk; + G_D12_CmdChunk *last_cmd_chunk; u64 chunks_count; u64 cmds_count; }; @@ -327,7 +327,7 @@ Struct(GPU_D12_CmdList) //////////////////////////////////////////////////////////// //~ Swapchain types -Struct(GPU_D12_Swapchain) +Struct(G_D12_Swapchain) { IDXGISwapChain3 *d3d_swapchain; @@ -338,15 +338,15 @@ Struct(GPU_D12_Swapchain) ID3D12Fence *present_fence; u64 present_fence_target; - GPU_Format backbuffers_format; + G_Format backbuffers_format; Vec2I32 backbuffers_resolution; - GPU_D12_Resource backbuffers[GPU_D12_SwapchainBufferCount]; + G_D12_Resource backbuffers[G_D12_SwapchainBufferCount]; }; //////////////////////////////////////////////////////////// //~ State types -Struct(GPU_D12_SharedState) +Struct(G_D12_SharedState) { Atomic64Padded resource_creation_gen; @@ -355,80 +355,80 @@ Struct(GPU_D12_SharedState) Atomic64 driver_descriptors_allocated; /* Queues */ - GPU_D12_Queue queues[GPU_NumQueues]; + G_D12_Queue queues[G_NumQueues]; /* Descriptor heaps */ - GPU_D12_DescriptorHeap descriptor_heaps[GPU_D12_DescriptorHeapKind_Count]; + G_D12_DescriptorHeap descriptor_heaps[G_D12_DescriptorHeapKind_Count]; /* Rootsig */ ID3D12RootSignature *bindless_rootsig; /* Pipelines */ - GPU_D12_PipelineBin pipeline_bins[1024]; + G_D12_PipelineBin pipeline_bins[1024]; /* Command lists */ Mutex free_cmd_lists_mutex; - GPU_D12_CmdList *first_free_cmd_list; + G_D12_CmdList *first_free_cmd_list; /* Command chunks */ Mutex free_cmd_chunks_mutex; - GPU_D12_CmdChunk *first_free_cmd_chunk; + G_D12_CmdChunk *first_free_cmd_chunk; /* Swapchains */ Mutex free_swapchains_mutex; - GPU_D12_Swapchain *first_free_swapchain; + G_D12_Swapchain *first_free_swapchain; /* Device */ IDXGIFactory6 *factory; IDXGIAdapter3 *adapter; ID3D12Device10 *device; -} extern GPU_D12_shared_state; +} extern G_D12_shared_state; //////////////////////////////////////////////////////////// //~ Helpers -#define GPU_D12_MakeHandle(type, ptr) (type) { .v = (u64)(ptr) } +#define G_D12_MakeHandle(type, ptr) (type) { .v = (u64)(ptr) } -GPU_D12_Arena *GPU_D12_ArenaFromHandle(GPU_ArenaHandle handle); -GPU_D12_CmdList *GPU_D12_CmdListFromHandle(GPU_CommandListHandle handle); -GPU_D12_Resource *GPU_D12_ResourceFromHandle(GPU_ResourceHandle handle); -GPU_D12_Swapchain *GPU_D12_SwapchainFromHandle(GPU_SwapchainHandle handle); +G_D12_Arena *G_D12_ArenaFromHandle(G_ArenaHandle handle); +G_D12_CmdList *G_D12_CmdListFromHandle(G_CommandListHandle handle); +G_D12_Resource *G_D12_ResourceFromHandle(G_ResourceHandle handle); +G_D12_Swapchain *G_D12_SwapchainFromHandle(G_SwapchainHandle handle); -DXGI_FORMAT GPU_D12_DxgiFormatFromGpuFormat(GPU_Format format); -D3D12_BARRIER_SYNC GPU_D12_BarrierSyncFromStages(GPU_Stage stages); -D3D12_BARRIER_ACCESS GPU_D12_BarrierAccessFromAccesses(GPU_Access accesses); -D3D12_BARRIER_LAYOUT GPU_D12_BarrierLayoutFromLayout(GPU_Layout layout); +DXGI_FORMAT G_D12_DxgiFormatFromGpuFormat(G_Format format); +D3D12_BARRIER_SYNC G_D12_BarrierSyncFromStages(G_Stage stages); +D3D12_BARRIER_ACCESS G_D12_BarrierAccessFromAccesses(G_Access accesses); +D3D12_BARRIER_LAYOUT G_D12_BarrierLayoutFromLayout(G_Layout layout); //////////////////////////////////////////////////////////// //~ Pipeline -GPU_D12_Pipeline *GPU_D12_PipelineFromDesc(GPU_D12_PipelineDesc desc); +G_D12_Pipeline *G_D12_PipelineFromDesc(G_D12_PipelineDesc desc); //////////////////////////////////////////////////////////// //~ Queue -GPU_D12_Queue *GPU_D12_QueueFromKind(GPU_QueueKind kind); +G_D12_Queue *G_D12_QueueFromKind(G_QueueKind kind); //////////////////////////////////////////////////////////// //~ Resource helpers -GPU_D12_Descriptor *GPU_D12_DescriptorFromIndex(GPU_D12_DescriptorHeapKind heap_kind, u32 index); -GPU_D12_Descriptor *GPU_D12_PushDescriptor(GPU_D12_Arena *gpu_arena, GPU_D12_DescriptorHeapKind heap_kind, u32 forced); +G_D12_Descriptor *G_D12_DescriptorFromIndex(G_D12_DescriptorHeapKind heap_kind, u32 index); +G_D12_Descriptor *G_D12_PushDescriptor(G_D12_Arena *gpu_arena, G_D12_DescriptorHeapKind heap_kind, u32 forced); //////////////////////////////////////////////////////////// //~ Raw command list -GPU_D12_RawCommandList *GPU_D12_PrepareRawCommandList(GPU_QueueKind queue_kind); -void GPU_D12_CommitRawCommandList(GPU_D12_RawCommandList *cl); +G_D12_RawCommandList *G_D12_PrepareRawCommandList(G_QueueKind queue_kind); +void G_D12_CommitRawCommandList(G_D12_RawCommandList *cl); //////////////////////////////////////////////////////////// //~ Command helpers -GPU_D12_Cmd *GPU_D12_PushCmd(GPU_D12_CmdList *cl); -GPU_D12_Cmd *GPU_D12_PushConstCmd(GPU_D12_CmdList *cl, i32 slot, void *v); -GPU_D12_StagingRegionNode *GPU_D12_PushStagingRegion(GPU_D12_CmdList *cl, u64 size); +G_D12_Cmd *G_D12_PushCmd(G_D12_CmdList *cl); +G_D12_Cmd *G_D12_PushConstCmd(G_D12_CmdList *cl, i32 slot, void *v); +G_D12_StagingRegionNode *G_D12_PushStagingRegion(G_D12_CmdList *cl, u64 size); //////////////////////////////////////////////////////////// //~ Workers -void GPU_D12_WorkerEntry(WaveLaneCtx *lane); +void G_D12_WorkerEntry(WaveLaneCtx *lane); diff --git a/src/gpu/gpu_dx12/gpu_dx12_shader_core.cgh b/src/gpu/gpu_dx12/gpu_dx12_shader_core.cgh index 8734436a..1ccb4c61 100644 --- a/src/gpu/gpu_dx12/gpu_dx12_shader_core.cgh +++ b/src/gpu/gpu_dx12/gpu_dx12_shader_core.cgh @@ -7,8 +7,8 @@ /* Slots below assume they won't overlap user defined constants */ StaticAssert(NumGeneralPurposeShaderConstants == 8); -ForceShaderConstant(RWByteAddressBufferRef, GPU_D12_DebugPrintBuff, 8); -#define GPU_D12_NumShaderConstants (9) +ForceShaderConstant(RWByteAddressBufferRef, G_D12_DebugPrintBuff, 8); +#define G_D12_NumShaderConstants (9) //////////////////////////////////////////////////////////// //~ @hookimpl Shader printf @@ -16,12 +16,12 @@ ForceShaderConstant(RWByteAddressBufferRef, GPU_D12_DebugPrintBuff, 8); #if IsLanguageG /* This technique comes from MJP's article: https://therealmjp.github.io/posts/hlsl-printf/ */ - #if GPU_DEBUG - #define DebugPrintImpl_(fmt_cstr) do { \ + #if G__DEBUG + #define G_DebugPrintImpl_(fmt_cstr) do { \ u32 __strlen = 0; \ for (;;) { if (U32FromChar(fmt_cstr[__strlen]) == 0) { break; } ++__strlen; } \ RWByteAddressBuffer __print_buff; \ - __print_buff = RWByteAddressBufferFromRef(GPU_D12_DebugPrintBuff); \ + __print_buff = RWByteAddressBufferFromRef(G__D12_DebugPrintBuff); \ u32 __pos; \ __print_buff.InterlockedAdd(0, __strlen, __pos); \ if (__pos < countof(__print_buff)) \ @@ -33,7 +33,7 @@ ForceShaderConstant(RWByteAddressBufferRef, GPU_D12_DebugPrintBuff, 8); } \ } while (0) #else - #define DebugPrintImpl_(fmt_cstr) + #define G_DebugPrintImpl_(fmt_cstr) #endif #endif diff --git a/src/gpu/gpu_shader_core.cgh b/src/gpu/gpu_shader_core.cgh index 5b9a273c..0863d722 100644 --- a/src/gpu/gpu_shader_core.cgh +++ b/src/gpu/gpu_shader_core.cgh @@ -2,13 +2,13 @@ //~ Shared static handles (common resources available to all shaders) #if IsLanguageC - #define GPU_SharedRef(type, v) ((type) { (v) }) + #define G_SharedRef(type, v) ((type) { (v) }) #elif IsLanguageG - #define GPU_SharedRef(type, v) (type(v)) + #define G_SharedRef(type, v) (type(v)) #endif -#define GPU_BasicPointSampler GPU_SharedRef(SamplerStateRef, 1) -#define GPU_BasicNoiseTexture GPU_SharedRef(Texture3DRef, 2) +#define G_BasicPointSampler G_SharedRef(SamplerStateRef, 1) +#define G_BasicNoiseTexture G_SharedRef(Texture3DRef, 2) //////////////////////////////////////////////////////////// //~ @hookdecl Shader printf @@ -16,6 +16,6 @@ #if IsLanguageG /* Implemented per graphics platform layer */ - #define DebugPrint(msg) DebugPrintImpl_(msg) + #define G_DebugPrint(msg) G_DebugPrintImpl_(msg) #endif diff --git a/src/pp/pp_vis/pp_vis_core.c b/src/pp/pp_vis/pp_vis_core.c index 39e92488..67daf108 100644 --- a/src/pp/pp_vis/pp_vis_core.c +++ b/src/pp/pp_vis/pp_vis_core.c @@ -35,7 +35,7 @@ void V_TickForever(WaveLaneCtx *lane) frame->arena = AcquireArena(Gibi(64)); frame->dverts_arena = AcquireArena(Gibi(64)); frame->dvert_idxs_arena = AcquireArena(Gibi(64)); - frame->gpu_arena = GPU_AcquireArena(); + frame->gpu_arena = G_AcquireArena(); } ////////////////////////////// @@ -125,18 +125,18 @@ void V_TickForever(WaveLaneCtx *lane) Arena *old_arena = frame->arena; Arena *old_dverts_arena = frame->dverts_arena; Arena *old_dvert_idxs_arena = frame->dvert_idxs_arena; - GPU_ArenaHandle old_gpu_arena = frame->gpu_arena; + G_ArenaHandle old_gpu_arena = frame->gpu_arena; ZeroStruct(frame); frame->arena = old_arena; frame->dverts_arena = old_dverts_arena; frame->dvert_idxs_arena = old_dvert_idxs_arena; frame->gpu_arena = old_gpu_arena; } - frame->cl = GPU_PrepareCommandList(GPU_QueueKind_Direct); + frame->cl = G_PrepareCommandList(G_QueueKind_Direct); ResetArena(frame->arena); ResetArena(frame->dverts_arena); ResetArena(frame->dvert_idxs_arena); - GPU_ResetArena(frame->cl, frame->gpu_arena); + G_ResetArena(frame->cl, frame->gpu_arena); frame->time_ns = TimeNs(); frame->tick = last_frame->tick + 1; @@ -561,21 +561,21 @@ void V_TickForever(WaveLaneCtx *lane) //- Push data to GPU /* Target */ - GPU_ResourceHandle draw_target = GPU_PushTexture2D( + G_ResourceHandle draw_target = G_PushTexture2D( frame->gpu_arena, - GPU_Format_R16G16B16A16_Float, + G_Format_R16G16B16A16_Float, window_frame.monitor_size, - GPU_Layout_DirectQueue_ShaderReadWrite, - .flags = GPU_ResourceFlag_AllowShaderReadWrite | GPU_ResourceFlag_AllowRenderTarget + G_Layout_DirectQueue_ShaderReadWrite, + .flags = G_ResourceFlag_AllowShaderReadWrite | G_ResourceFlag_AllowRenderTarget ); - Texture2DRef draw_target_ro = GPU_PushTexture2DRef(frame->gpu_arena, draw_target); - RWTexture2DRef draw_target_rw = GPU_PushRWTexture2DRef(frame->gpu_arena, draw_target); + Texture2DRef draw_target_ro = G_PushTexture2DRef(frame->gpu_arena, draw_target); + RWTexture2DRef draw_target_rw = G_PushRWTexture2DRef(frame->gpu_arena, draw_target); /* Verts */ - GPU_ResourceHandle dverts_buff = GPU_PushBufferFromCpu(frame->gpu_arena, frame->cl, StringFromArena(frame->dverts_arena)); - GPU_ResourceHandle dvert_idxs_buff = GPU_PushBufferFromCpu(frame->gpu_arena, frame->cl, StringFromArena(frame->dvert_idxs_arena)); - StructuredBufferRef dverts_ro = GPU_PushStructuredBufferRef(frame->gpu_arena, dverts_buff, V_DVert); - GPU_IndexBufferDesc dvert_idxs_ib = GPU_IdxBuff32(dvert_idxs_buff); + G_ResourceHandle dverts_buff = G_PushBufferFromCpu(frame->gpu_arena, frame->cl, StringFromArena(frame->dverts_arena)); + G_ResourceHandle dvert_idxs_buff = G_PushBufferFromCpu(frame->gpu_arena, frame->cl, StringFromArena(frame->dvert_idxs_arena)); + StructuredBufferRef dverts_ro = G_PushStructuredBufferRef(frame->gpu_arena, dverts_buff, V_DVert); + G_IndexBufferDesc dvert_idxs_ib = G_IdxBuff32(dvert_idxs_buff); /* Params */ V_DParams params = ZI; @@ -586,11 +586,11 @@ void V_TickForever(WaveLaneCtx *lane) params.shape_verts = dverts_ro; params.world_to_draw_xf = world_to_draw_xf; } - GPU_ResourceHandle params_buff = GPU_PushBufferFromCpu(frame->gpu_arena, frame->cl, StringFromStruct(¶ms)); - StructuredBufferRef params_ro = GPU_PushStructuredBufferRef(frame->gpu_arena, params_buff, V_DParams); + G_ResourceHandle params_buff = G_PushBufferFromCpu(frame->gpu_arena, frame->cl, StringFromStruct(¶ms)); + StructuredBufferRef params_ro = G_PushStructuredBufferRef(frame->gpu_arena, params_buff, V_DParams); /* Constants */ - GPU_SetConstant(frame->cl, V_ShaderConst_Params, params_ro); + G_SetConstant(frame->cl, V_ShaderConst_Params, params_ro); Rng3 viewport = RNG3(VEC3(0, 0, 0), VEC3(draw_size.x, draw_size.y, 1)); Rng2 scissor = RNG2(VEC2(viewport.p0.x, viewport.p0.y), VEC2(viewport.p1.x, viewport.p1.y)); @@ -601,35 +601,35 @@ void V_TickForever(WaveLaneCtx *lane) /* Backdrop pass */ { - GPU_Compute(frame->cl, V_BackdropCS, V_BackdropCSThreadSizeFromTexSize(draw_size)); + G_Compute(frame->cl, V_BackdropCS, V_BackdropCSThreadSizeFromTexSize(draw_size)); } ////////////////////////////// //- Shapes pass - GPU_DumbMemoryLayoutSync(frame->cl, draw_target, GPU_Layout_DirectQueue_RenderTargetWrite); + G_DumbMemoryLayoutSync(frame->cl, draw_target, G_Layout_DirectQueue_RenderTargetWrite); /* Shapes pass */ { - GPU_Rasterize(frame->cl, + G_Rasterize(frame->cl, V_DVertVS, V_DVertPS, 1, dvert_idxs_ib, 1, &draw_target, viewport, scissor, - GPU_RasterMode_TriangleList); + G_RasterMode_TriangleList); } ////////////////////////////// //- Finalize draw target - GPU_DumbMemoryLayoutSync(frame->cl, draw_target, GPU_Layout_DirectQueue_ShaderRead); + G_DumbMemoryLayoutSync(frame->cl, draw_target, G_Layout_DirectQueue_ShaderRead); UI_SetRawTexture(vis_box, draw_target_ro, VEC2(0, 0), VEC2(1, 1)); } ////////////////////////////// //- End frame - GPU_CommitCommandList(frame->cl); + G_CommitCommandList(frame->cl); UI_EndFrame(ui_frame); diff --git a/src/pp/pp_vis/pp_vis_core.h b/src/pp/pp_vis/pp_vis_core.h index afb72a2f..37cf4867 100644 --- a/src/pp/pp_vis/pp_vis_core.h +++ b/src/pp/pp_vis/pp_vis_core.h @@ -78,11 +78,11 @@ Struct(V_Frame) Arena *arena; Arena *dverts_arena; Arena *dvert_idxs_arena; - GPU_ArenaHandle gpu_arena; + G_ArenaHandle gpu_arena; i64 tick; i64 time_ns; - GPU_CommandListHandle cl; + G_CommandListHandle cl; }; Struct(V_State) diff --git a/src/proto/proto.c b/src/proto/proto.c index 86a2e33f..ab88fc1b 100644 --- a/src/proto/proto.c +++ b/src/proto/proto.c @@ -1,10 +1,10 @@ void PT_RunForever(WaveLaneCtx *lane) { - GPU_ArenaHandle gpu_frame_arena = GPU_AcquireArena(); + G_ArenaHandle gpu_frame_arena = G_AcquireArena(); for (;;) { - WND_Frame window_frame = WND_BeginFrame(GPU_Format_R16G16B16A16_Float, WND_BackbufferSizeMode_MatchWindow); + WND_Frame window_frame = WND_BeginFrame(G_Format_R16G16B16A16_Float, WND_BackbufferSizeMode_MatchWindow); for (u64 cev_idx = 0; cev_idx < window_frame.controller_events.count; ++cev_idx) { ControllerEvent *cev = &window_frame.controller_events.events[cev_idx]; @@ -17,64 +17,64 @@ void PT_RunForever(WaveLaneCtx *lane) { { - GPU_CommandListHandle cl = GPU_PrepareCommandList(GPU_QueueKind_Direct); + G_CommandListHandle cl = G_PrepareCommandList(G_QueueKind_Direct); { /* Push resources */ Vec2I32 final_target_size = window_frame.draw_size; - GPU_ResourceHandle final_target = GPU_PushTexture2D(gpu_frame_arena, - GPU_Format_R16G16B16A16_Float, + G_ResourceHandle final_target = G_PushTexture2D(gpu_frame_arena, + G_Format_R16G16B16A16_Float, final_target_size, - GPU_Layout_DirectQueue_ShaderReadWrite, - .flags = GPU_ResourceFlag_AllowShaderReadWrite); + G_Layout_DirectQueue_ShaderReadWrite, + .flags = G_ResourceFlag_AllowShaderReadWrite); /* Push resource handles */ - Texture2DRef final_target_rhandle = GPU_PushTexture2DRef(gpu_frame_arena, final_target); - RWTexture2DRef final_target_rwhandle = GPU_PushRWTexture2DRef(gpu_frame_arena, final_target); + Texture2DRef final_target_rhandle = G_PushTexture2DRef(gpu_frame_arena, final_target); + RWTexture2DRef final_target_rwhandle = G_PushRWTexture2DRef(gpu_frame_arena, final_target); /* Prep test pass */ { - GPU_SetConstant(cl, PT_ShaderConst_TestTargetWidth, final_target_size.x); - GPU_SetConstant(cl, PT_ShaderConst_TestTargetHeight, final_target_size.y); - GPU_SetConstant(cl, PT_ShaderConst_TestTarget, final_target_rwhandle); - GPU_SetConstant(cl, PT_ShaderConst_TestConst, 3.123); - GPU_SetConstant(cl, PT_ShaderConst_BlitSampler, GPU_BasicPointSampler); - GPU_SetConstant(cl, PT_ShaderConst_BlitSrc, final_target_rhandle); - GPU_SetConstant(cl, PT_ShaderConst_NoiseTex, GPU_BasicNoiseTexture); + G_SetConstant(cl, PT_ShaderConst_TestTargetWidth, final_target_size.x); + G_SetConstant(cl, PT_ShaderConst_TestTargetHeight, final_target_size.y); + G_SetConstant(cl, PT_ShaderConst_TestTarget, final_target_rwhandle); + G_SetConstant(cl, PT_ShaderConst_TestConst, 3.123); + G_SetConstant(cl, PT_ShaderConst_BlitSampler, G_BasicPointSampler); + G_SetConstant(cl, PT_ShaderConst_BlitSrc, final_target_rhandle); + G_SetConstant(cl, PT_ShaderConst_NoiseTex, G_BasicNoiseTexture); } /* Test pass */ { - GPU_Compute(cl, PT_TestCS, VEC3I32((final_target_size.x + 7) / 8, (final_target_size.y + 7) / 8, 1)); + G_Compute(cl, PT_TestCS, VEC3I32((final_target_size.x + 7) / 8, (final_target_size.y + 7) / 8, 1)); } - GPU_DumbMemorySync(cl, final_target); + G_DumbMemorySync(cl, final_target); /* Prep blit pass */ { - GPU_DumbMemoryLayoutSync(cl, final_target, GPU_Layout_DirectQueue_ShaderRead); - GPU_DumbMemoryLayoutSync(cl, window_frame.backbuffer, GPU_Layout_DirectQueue_RenderTargetWrite); + G_DumbMemoryLayoutSync(cl, final_target, G_Layout_DirectQueue_ShaderRead); + G_DumbMemoryLayoutSync(cl, window_frame.backbuffer, G_Layout_DirectQueue_RenderTargetWrite); } /* Blit pass */ { - GPU_Rasterize(cl, + G_Rasterize(cl, PT_BlitVS, PT_BlitPS, - 1, GPU_GetSharedQuadIndices(), + 1, G_GetSharedQuadIndices(), 1, &window_frame.backbuffer, - GPU_ViewportFromTexture(window_frame.backbuffer), GPU_ScissorFromTexture(window_frame.backbuffer), - GPU_RasterMode_TriangleList); + G_ViewportFromTexture(window_frame.backbuffer), G_ScissorFromTexture(window_frame.backbuffer), + G_RasterMode_TriangleList); } /* Finalize backbuffer layout */ { - GPU_DumbMemoryLayoutSync(cl, window_frame.backbuffer, GPU_Layout_AnyQueue_ShaderRead_CopyRead_CopyWrite_Present); + G_DumbMemoryLayoutSync(cl, window_frame.backbuffer, G_Layout_AnyQueue_ShaderRead_CopyRead_CopyWrite_Present); } /* Reset */ { - GPU_ResetArena(cl, gpu_frame_arena); + G_ResetArena(cl, gpu_frame_arena); } } - GPU_CommitCommandList(cl); + G_CommitCommandList(cl); } } WND_EndFrame(window_frame, VSYNC); diff --git a/src/ui/ui_core.c b/src/ui/ui_core.c index 26ccfafa..bb930170 100644 --- a/src/ui/ui_core.c +++ b/src/ui/ui_core.c @@ -458,7 +458,7 @@ UI_Frame *UI_BeginFrame(UI_FrameFlag frame_flags, Vec4 swapchain_color) UI_Frame *frame = &g->frames[i]; frame->arena = AcquireArena(Gibi(64)); frame->rects_arena = AcquireArena(Gibi(64)); - frame->gpu_arena = GPU_AcquireArena(); + frame->gpu_arena = G_AcquireArena(); } } @@ -479,17 +479,17 @@ UI_Frame *UI_BeginFrame(UI_FrameFlag frame_flags, Vec4 swapchain_color) { Arena *old_arena = frame->arena; Arena *old_rects_arena = frame->arena; - GPU_ArenaHandle old_gpu_arena = frame->gpu_arena; + G_ArenaHandle old_gpu_arena = frame->gpu_arena; ZeroStruct(frame); frame->arena = old_arena; frame->rects_arena = old_rects_arena; frame->gpu_arena = old_gpu_arena; } - frame->window_frame = WND_BeginFrame(GPU_Format_R16G16B16A16_Float, WND_BackbufferSizeMode_MatchMonitor); - frame->cl = GPU_PrepareCommandList(GPU_QueueKind_Direct); + frame->window_frame = WND_BeginFrame(G_Format_R16G16B16A16_Float, WND_BackbufferSizeMode_MatchMonitor); + frame->cl = G_PrepareCommandList(G_QueueKind_Direct); ResetArena(frame->arena); ResetArena(frame->rects_arena); - GPU_ResetArena(frame->cl, frame->gpu_arena); + G_ResetArena(frame->cl, frame->gpu_arena); { i64 now_ns = TimeNs(); @@ -1208,7 +1208,7 @@ void UI_EndFrame(UI_Frame *frame) ////////////////////////////// //- Render - GPU_ResourceHandle backbuffer = frame->window_frame.backbuffer; + G_ResourceHandle backbuffer = frame->window_frame.backbuffer; { ////////////////////////////// //- Build render data @@ -1371,18 +1371,18 @@ void UI_EndFrame(UI_Frame *frame) //- Push data to GPU /* Target */ - GPU_ResourceHandle draw_target = GPU_PushTexture2D( + G_ResourceHandle draw_target = G_PushTexture2D( frame->gpu_arena, - GPU_Format_R16G16B16A16_Float, + G_Format_R16G16B16A16_Float, monitor_size, - GPU_Layout_DirectQueue_RenderTargetWrite, - .flags = GPU_ResourceFlag_AllowRenderTarget + G_Layout_DirectQueue_RenderTargetWrite, + .flags = G_ResourceFlag_AllowRenderTarget ); - Texture2DRef draw_target_ro = GPU_PushTexture2DRef(frame->gpu_arena, draw_target); + Texture2DRef draw_target_ro = G_PushTexture2DRef(frame->gpu_arena, draw_target); /* Rects */ - GPU_ResourceHandle rects_buff = GPU_PushBufferFromCpu(frame->gpu_arena, frame->cl, StringFromArena(frame->rects_arena)); - StructuredBufferRef rects_ro = GPU_PushStructuredBufferRef(frame->gpu_arena, rects_buff, UI_DRect); + G_ResourceHandle rects_buff = G_PushBufferFromCpu(frame->gpu_arena, frame->cl, StringFromArena(frame->rects_arena)); + StructuredBufferRef rects_ro = G_PushStructuredBufferRef(frame->gpu_arena, rects_buff, UI_DRect); /* Params */ UI_DParams params = ZI; @@ -1391,70 +1391,70 @@ void UI_EndFrame(UI_Frame *frame) params.target_ro = draw_target_ro; params.rects = rects_ro; } - GPU_ResourceHandle params_buff = GPU_PushBufferFromCpu(frame->gpu_arena, frame->cl, StringFromStruct(¶ms)); - StructuredBufferRef params_ro = GPU_PushStructuredBufferRef(frame->gpu_arena, params_buff, UI_DParams); + G_ResourceHandle params_buff = G_PushBufferFromCpu(frame->gpu_arena, frame->cl, StringFromStruct(¶ms)); + StructuredBufferRef params_ro = G_PushStructuredBufferRef(frame->gpu_arena, params_buff, UI_DParams); /* Constants */ - GPU_SetConstant(frame->cl, UI_ShaderConst_Params, params_ro); + G_SetConstant(frame->cl, UI_ShaderConst_Params, params_ro); ////////////////////////////// //- Dispatch shaders - GPU_DumbMemoryLayoutSync(frame->cl, draw_target, GPU_Layout_DirectQueue_RenderTargetWrite); + G_DumbMemoryLayoutSync(frame->cl, draw_target, G_Layout_DirectQueue_RenderTargetWrite); //- Clear pass { - GPU_ClearRenderTarget(frame->cl, draw_target, VEC4(1, 0, 0, 1)); + G_ClearRenderTarget(frame->cl, draw_target, VEC4(1, 0, 0, 1)); } //- Rect pass - GPU_DumbMemoryLayoutSync(frame->cl, draw_target, GPU_Layout_DirectQueue_RenderTargetWrite); + G_DumbMemoryLayoutSync(frame->cl, draw_target, G_Layout_DirectQueue_RenderTargetWrite); - if (GPU_CountBufferBytes(rects_buff) > 0) + if (G_CountBufferBytes(rects_buff) > 0) { /* Render rects */ - GPU_Rasterize(frame->cl, + G_Rasterize(frame->cl, UI_DRectVS, UI_DRectPS, - 1, GPU_GetSharedQuadIndices(), + 1, G_GetSharedQuadIndices(), 1, &draw_target, draw_viewport, draw_scissor, - GPU_RasterMode_TriangleList); + G_RasterMode_TriangleList); /* Render rect wireframes */ if (AnyBit(frame->frame_flags, UI_FrameFlag_Debug)) { - GPU_SetConstant(frame->cl, UI_ShaderConst_DebugDraw, 1); - GPU_Rasterize(frame->cl, + G_SetConstant(frame->cl, UI_ShaderConst_DebugDraw, 1); + G_Rasterize(frame->cl, UI_DRectVS, UI_DRectPS, - 1, GPU_GetSharedQuadIndices(), + 1, G_GetSharedQuadIndices(), 1, &draw_target, draw_viewport, draw_scissor, - GPU_RasterMode_WireTriangleList); + G_RasterMode_WireTriangleList); } } //- Backbuffer blit pass - GPU_DumbMemoryLayoutSync(frame->cl, draw_target, GPU_Layout_DirectQueue_ShaderRead); - GPU_DumbMemoryLayoutSync(frame->cl, backbuffer, GPU_Layout_DirectQueue_RenderTargetWrite); + G_DumbMemoryLayoutSync(frame->cl, draw_target, G_Layout_DirectQueue_ShaderRead); + G_DumbMemoryLayoutSync(frame->cl, backbuffer, G_Layout_DirectQueue_RenderTargetWrite); { - GPU_Rasterize(frame->cl, + G_Rasterize(frame->cl, UI_BlitVS, UI_BlitPS, - 1, GPU_GetSharedQuadIndices(), + 1, G_GetSharedQuadIndices(), 1, &backbuffer, draw_viewport, draw_scissor, - GPU_RasterMode_TriangleList); + G_RasterMode_TriangleList); } - GPU_DumbMemoryLayoutSync(frame->cl, backbuffer, GPU_Layout_AnyQueue_ShaderRead_CopyRead_CopyWrite_Present); + G_DumbMemoryLayoutSync(frame->cl, backbuffer, G_Layout_AnyQueue_ShaderRead_CopyRead_CopyWrite_Present); } ////////////////////////////// //- End frame - GPU_CommitCommandList(frame->cl); + G_CommitCommandList(frame->cl); WND_EndFrame(frame->window_frame, VSYNC); EndScratch(scratch); diff --git a/src/ui/ui_core.h b/src/ui/ui_core.h index 7b523643..e8a7b705 100644 --- a/src/ui/ui_core.h +++ b/src/ui/ui_core.h @@ -313,11 +313,11 @@ Struct(UI_Frame) { Arena *arena; Arena *rects_arena; - GPU_ArenaHandle gpu_arena; + G_ArenaHandle gpu_arena; WND_Frame window_frame; - GPU_ResourceHandle backbuffer; - GPU_CommandListHandle cl; + G_ResourceHandle backbuffer; + G_CommandListHandle cl; u64 transient_key_seed; diff --git a/src/ui/ui_shaders.g b/src/ui/ui_shaders.g index 3623505a..d1edfca6 100644 --- a/src/ui/ui_shaders.g +++ b/src/ui/ui_shaders.g @@ -33,7 +33,7 @@ PixelShader(UI_DRectPS, UI_DRectPSOutput, UI_DRectPSInput input) { UI_DParams params = StructuredBufferFromRef(UI_ShaderConst_Params)[0]; StructuredBuffer rects = StructuredBufferFromRef(params.rects); - SamplerState sampler = SamplerStateFromRef(GPU_BasicPointSampler); + SamplerState sampler = SamplerStateFromRef(G_BasicPointSampler); UI_DRect rect = rects[input.rect_idx]; @@ -140,12 +140,12 @@ PixelShader(UI_BlitPS, UI_BlitPSOutput, UI_BlitPSInput input) { UI_DParams params = StructuredBufferFromRef(UI_ShaderConst_Params)[0]; Texture2D tex = Texture2DFromRef(params.target_ro); - SamplerState sampler = SamplerStateFromRef(GPU_BasicPointSampler); + SamplerState sampler = SamplerStateFromRef(G_BasicPointSampler); Vec2 uv = input.src_uv; Vec4 result = tex.Sample(sampler, uv); - DebugPrint("Hello there"); + G_DebugPrint("Hello there"); UI_BlitPSOutput output; output.SV_Target0 = result; diff --git a/src/window/window.h b/src/window/window.h index c4546d94..593cd856 100644 --- a/src/window/window.h +++ b/src/window/window.h @@ -38,7 +38,7 @@ Enum(WND_BackbufferSizeMode) Struct(WND_Frame) { WND_Handle window; - GPU_ResourceHandle backbuffer; + G_ResourceHandle backbuffer; ControllerEventsArray controller_events; @@ -67,5 +67,5 @@ void WND_PushCmd_(WND_Frame frame, WND_Cmd desc); //////////////////////////////////////////////////////////// //~ @hookdecl Frame -WND_Frame WND_BeginFrame(GPU_Format backbuffer_format, WND_BackbufferSizeMode backbuffer_size_mode); +WND_Frame WND_BeginFrame(G_Format backbuffer_format, WND_BackbufferSizeMode backbuffer_size_mode); void WND_EndFrame(WND_Frame frame, i32 vsync); diff --git a/src/window/window_win32/window_win32.c b/src/window/window_win32/window_win32.c index 20c7e69f..cd1b2696 100644 --- a/src/window/window_win32/window_win32.c +++ b/src/window/window_win32/window_win32.c @@ -145,7 +145,7 @@ void WND_W32_ProcessMessagesForever(WaveLaneCtx *lane) //- Acquire swapchain { - window->swapchain = GPU_AcquireSwapchain((u64)window->hwnd); + window->swapchain = G_AcquireSwapchain((u64)window->hwnd); } //- Begin processing messages @@ -403,7 +403,7 @@ void WND_PushCmd_(WND_Frame frame, WND_Cmd desc) //////////////////////////////////////////////////////////// //~ @hookimpl Frame -WND_Frame WND_BeginFrame(GPU_Format backbuffer_format, WND_BackbufferSizeMode backbuffer_size_mode) +WND_Frame WND_BeginFrame(G_Format backbuffer_format, WND_BackbufferSizeMode backbuffer_size_mode) { WND_W32_SharedState *g = &WND_W32_shared_state; WND_W32_Window *window = &g->window; @@ -449,7 +449,7 @@ WND_Frame WND_BeginFrame(GPU_Format backbuffer_format, WND_BackbufferSizeMode ba { backbuffer_size = result.monitor_size; } - result.backbuffer = GPU_PrepareBackbuffer(window->swapchain, backbuffer_format, backbuffer_size); + result.backbuffer = G_PrepareBackbuffer(window->swapchain, backbuffer_format, backbuffer_size); /* Reset per-frame data */ if (!window->frame_arena) @@ -663,7 +663,7 @@ void WND_EndFrame(WND_Frame frame, i32 vsync) } /* Commit backbuffer */ - GPU_CommitBackbuffer(frame.backbuffer, vsync); + G_CommitBackbuffer(frame.backbuffer, vsync); ++window->frame_gen; EndScratch(scratch); diff --git a/src/window/window_win32/window_win32.h b/src/window/window_win32/window_win32.h index 4b748419..5a4fd5f9 100644 --- a/src/window/window_win32/window_win32.h +++ b/src/window/window_win32/window_win32.h @@ -4,8 +4,8 @@ Struct(WND_W32_Window) { HWND hwnd; - GPU_SwapchainHandle swapchain; - GPU_ResourceHandle backbuffer; + G_SwapchainHandle swapchain; + G_ResourceHandle backbuffer; Atomic32 is_ready; /* Window proc state */