dx12 progress

This commit is contained in:
jacob 2025-06-17 21:10:33 -05:00
parent 13f5348265
commit 1f7afffe32
2 changed files with 116 additions and 102 deletions

View File

@ -1595,21 +1595,22 @@ void gpu_dispatch(struct gpu_dispatch_params params)
__profscope_dx11(G.profiling_ctx, Dispatch, RGB32_F(0.5, 0.2, 0.2));
struct dx11_plan *plan = (struct dx11_plan *)params.plan.v;
/* Swap cmd lists */
struct arena swp_arena = plan->gpu_cmds_arena;
plan->gpu_cmds_arena = plan->cpu_cmds_arena;
plan->gpu_first_cmd = plan->cpu_first_cmd;
plan->gpu_last_cmd = plan->cpu_last_cmd;
/* Reset cpu cmd list */
plan->cpu_cmds_arena = swp_arena;
plan->cpu_first_cmd = NULL;
plan->cpu_last_cmd = NULL;
arena_reset(&plan->cpu_cmds_arena);
/* Submit cmd data */
{
__profscope(Submit buffers);
/* Swap cmd plans */
struct arena swp_arena = plan->gpu_cmds_arena;
plan->gpu_cmds_arena = plan->cpu_cmds_arena;
plan->gpu_first_cmd = plan->cpu_first_cmd;
plan->gpu_last_cmd = plan->cpu_last_cmd;
/* Reset cpu cmds */
plan->cpu_cmds_arena = swp_arena;
plan->cpu_first_cmd = NULL;
plan->cpu_last_cmd = NULL;
arena_reset(&plan->cpu_cmds_arena);
/* Submit mesh buffers */
dx11_buffer_submit(plan->cmd_buffers.mesh.vertex_buffer);
dx11_buffer_submit(plan->cmd_buffers.mesh.index_buffer);

View File

@ -80,6 +80,12 @@ struct pipeline_error {
struct string msg;
};
struct command_queue {
struct arena arena;
struct sys_mutex mutex;
ID3D12CommandQueue *cq;
};
struct descriptor {
struct cpu_descriptor_heap *heap;
D3D12_CPU_DESCRIPTOR_HANDLE handle;
@ -208,10 +214,10 @@ GLOBAL struct {
/* Command queues */
/* TODO: Add optional mode to route everything to direct queue */
ID3D12CommandQueue *cq_direct;
ID3D12CommandQueue *cq_compute;
ID3D12CommandQueue *cq_copy_critical;
ID3D12CommandQueue *cq_copy_background;
struct command_queue *cq_direct;
struct command_queue *cq_compute;
struct command_queue *cq_copy_critical;
struct command_queue *cq_copy_background;
/* Swapchain */
u32 swapchain_frame_index;
@ -232,11 +238,13 @@ GLOBAL struct {
* ========================== */
INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(gpu_shutdown);
INTERNAL struct cpu_descriptor_heap *cpu_descriptor_heap_alloc(enum D3D12_DESCRIPTOR_HEAP_TYPE type);
INTERNAL void dx12_init_device(void);
INTERNAL void dx12_init_objects(void);
INTERNAL void dx12_init_swapchain(struct sys_window *window);
INTERNAL void dx12_init_pipelines(void);
INTERNAL struct cpu_descriptor_heap *cpu_descriptor_heap_alloc(enum D3D12_DESCRIPTOR_HEAP_TYPE type);
INTERNAL struct command_queue *command_queue_alloc(enum D3D12_COMMAND_LIST_TYPE type, enum D3D12_COMMAND_QUEUE_PRIORITY priority);
INTERNAL void command_queue_release(struct command_queue *cq);
struct gpu_startup_receipt gpu_startup(struct work_startup_receipt *work_sr, struct sys_window *window)
{
@ -341,10 +349,10 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(gpu_shutdown)
ID3D12DescriptorHeap_Release(G.swapchain_rtv_heap);
IDXGISwapChain3_Release(G.swapchain);
ID3D12CommandAllocator_Release(G.swapchain_ca);
ID3D12CommandQueue_Release(G.cq_copy_background);
ID3D12CommandQueue_Release(G.cq_copy_critical);
ID3D12CommandQueue_Release(G.cq_compute);
ID3D12CommandQueue_Release(G.cq_direct);
command_queue_release(G.cq_copy_background);
command_queue_release(G.cq_copy_critical);
command_queue_release(G.cq_compute);
command_queue_release(G.cq_direct);
ID3D12Device_Release(G.device);
#endif
}
@ -575,8 +583,6 @@ INTERNAL void dx12_init_device(void)
INTERNAL void dx12_init_objects(void)
{
HRESULT hr = 0;
/* Initialize desc sizes */
G.desc_sizes[D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV] = ID3D12Device_GetDescriptorHandleIncrementSize(G.device, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
G.desc_sizes[D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER] = ID3D12Device_GetDescriptorHandleIncrementSize(G.device, D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER);
@ -592,49 +598,10 @@ INTERNAL void dx12_init_objects(void)
G.rtv_heap = cpu_descriptor_heap_alloc(D3D12_DESCRIPTOR_HEAP_TYPE_RTV);
/* Create direct command queue */
{
D3D12_COMMAND_QUEUE_DESC desc = ZI;
desc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
desc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT;
hr = ID3D12Device_CreateCommandQueue(G.device, &desc, &IID_ID3D12CommandQueue, (void **)&G.cq_direct);
if (FAILED(hr)) {
dx12_init_error(LIT("Failed to create direct command queue"));
}
}
/* Create compute command queue */
{
D3D12_COMMAND_QUEUE_DESC desc = ZI;
desc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
desc.Type = D3D12_COMMAND_LIST_TYPE_COMPUTE;
hr = ID3D12Device_CreateCommandQueue(G.device, &desc, &IID_ID3D12CommandQueue, (void **)&G.cq_compute);
if (FAILED(hr)) {
dx12_init_error(LIT("Failed to create compute command queue"));
}
}
/* Create critical copy command queue */
{
D3D12_COMMAND_QUEUE_DESC desc = ZI;
desc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
desc.Type = D3D12_COMMAND_LIST_TYPE_COPY;
desc.Priority = D3D12_COMMAND_QUEUE_PRIORITY_HIGH;
hr = ID3D12Device_CreateCommandQueue(G.device, &desc, &IID_ID3D12CommandQueue, (void **)&G.cq_copy_critical);
if (FAILED(hr)) {
dx12_init_error(LIT("Failed to create critical copy command queue"));
}
}
/* Create background copy command queue */
{
D3D12_COMMAND_QUEUE_DESC desc = ZI;
desc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
desc.Type = D3D12_COMMAND_LIST_TYPE_COPY;
hr = ID3D12Device_CreateCommandQueue(G.device, &desc, &IID_ID3D12CommandQueue, (void **)&G.cq_copy_background);
if (FAILED(hr)) {
dx12_init_error(LIT("Failed to create background copy command queue"));
}
}
G.cq_direct = command_queue_alloc(D3D12_COMMAND_LIST_TYPE_DIRECT, D3D12_COMMAND_QUEUE_PRIORITY_NORMAL);
G.cq_compute = command_queue_alloc(D3D12_COMMAND_LIST_TYPE_COMPUTE, D3D12_COMMAND_QUEUE_PRIORITY_NORMAL);
G.cq_copy_critical = command_queue_alloc(D3D12_COMMAND_LIST_TYPE_COPY, D3D12_COMMAND_QUEUE_PRIORITY_HIGH);
G.cq_copy_background = command_queue_alloc(D3D12_COMMAND_LIST_TYPE_COPY, D3D12_COMMAND_QUEUE_PRIORITY_NORMAL);
}
/* ========================== *
@ -1214,7 +1181,39 @@ INTERNAL void pipeline_release(struct pipeline *pipeline)
}
}
#if 1
/* ========================== *
* Command queue
* ========================== */
INTERNAL struct command_queue *command_queue_alloc(enum D3D12_COMMAND_LIST_TYPE type, enum D3D12_COMMAND_QUEUE_PRIORITY priority)
{
struct command_queue *cq = NULL;
{
struct arena arena = arena_alloc(GIGABYTE(64));
cq = arena_push(&arena, struct command_queue);
cq->arena = arena;
}
cq->mutex = sys_mutex_alloc();
D3D12_COMMAND_QUEUE_DESC desc = ZI;
desc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
desc.Type = type;
desc.Priority = priority;
HRESULT hr = ID3D12Device_CreateCommandQueue(G.device, &desc, &IID_ID3D12CommandQueue, (void **)&G.cq_copy_critical);
if (FAILED(hr)) {
dx12_init_error(LIT("Failed to create command queue"));
}
return cq;
}
INTERNAL void command_queue_release(struct command_queue *cq)
{
/* TODO */
(UNUSED)cq;
//ID3D12CommandQueue_Release(G.cq_copy_background->cq);
}
/* ========================== *
* CPU descriptor heap
@ -1346,6 +1345,7 @@ INTERNAL struct gpu_descriptor_heap *gpu_descriptor_heap_alloc(struct cpu_descri
sys_mutex_unlock(&lock);
}
MEMZERO_STRUCT(dh_gpu);
if (!heap) {
D3D12_DESCRIPTOR_HEAP_DESC desc = ZI;
desc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV;
@ -1378,11 +1378,11 @@ INTERNAL struct gpu_descriptor_heap *gpu_descriptor_heap_alloc(struct cpu_descri
return dh_gpu;
}
INTERNAL void gpu_descriptor_heap_release(struct gpu_descriptor_heap *dh, ID3D12CommandQueue *cq)
INTERNAL void gpu_descriptor_heap_release(struct command_queue *cq, struct gpu_descriptor_heap *dh)
{
/* Queue fence signal */
++dh->free_fence_value;
ID3D12CommandQueue_Signal(cq, dh->free_fence, dh->free_fence_value);
ID3D12CommandQueue_Signal(cq->cq, dh->free_fence, dh->free_fence_value);
/* Add to free list */
struct sys_lock lock = sys_mutex_lock_e(&G.gpu_descriptor_heaps_mutex);
dh->next_free = G.first_free_gpu_descriptor_heap;
@ -1396,6 +1396,14 @@ INTERNAL void gpu_descriptor_heap_release(struct gpu_descriptor_heap *dh, ID3D12
sys_mutex_unlock(&lock);
}
/* ========================== *
* Buffer
* ========================== */
struct dx12_buffer {
u64 count;
};
/* ========================== *
* Plan
* ========================== */
@ -1406,6 +1414,8 @@ struct plan {
ID3D12CommandAllocator *ca_direct;
ID3D12GraphicsCommandList *cl_direct;
struct dx12_buffer *material_instances;
struct plan *next_free;
};
@ -1607,8 +1617,27 @@ struct v2i32 gpu_texture_get_size(struct gpu_handle resource)
void gpu_dispatch(struct gpu_dispatch_params params)
{
__prof;
struct plan *plan = handle_get_data(params.plan, DX12_HANDLE_KIND_PLAN);
struct dx12_resource *target = handle_get_data(params.draw_target, DX12_HANDLE_KIND_RESOURCE);
HRESULT hr = 0;
struct command_queue *cq = G.cq_direct;
ID3D12CommandAllocator *ca = plan->ca_direct;
ID3D12GraphicsCommandList *cl = plan->cl_direct;
/* FIXME: Use fence to ensure command allocator has finished execution on GPU before resetting */
/* TODO: Reuse temporary command lists in same way as gpu descriptor heaps */
hr = ID3D12CommandAllocator_Reset(ca);
if (FAILED(hr)) {
sys_panic(LIT("Failed to reset command allocator"));
}
hr = ID3D12GraphicsCommandList_Reset(cl, ca, NULL);
if (FAILED(hr)) {
sys_panic(LIT("Failed to reset command list"));
}
/* Viewport */
struct rect viewport = params.draw_target_viewport;
@ -1627,32 +1656,19 @@ void gpu_dispatch(struct gpu_dispatch_params params)
d3d12_scissor.right = viewport.x + viewport.width;
d3d12_scissor.bottom = viewport.y + viewport.height;
struct plan *plan = handle_get_data(params.plan, DX12_HANDLE_KIND_PLAN);
struct dx12_resource *target = handle_get_data(params.draw_target, DX12_HANDLE_KIND_RESOURCE);
ID3D12CommandQueue *cq = G.cq_direct;
ID3D12CommandAllocator *ca = plan->ca_direct;
ID3D12GraphicsCommandList *cl = plan->cl_direct;
/* FIXME: Use fence to ensure command allocator has finished execution on GPU before resetting */
hr = ID3D12CommandAllocator_Reset(ca);
if (FAILED(hr)) {
sys_panic(LIT("Failed to reset command allocator"));
/* Submit buffers */
#if 0
{
dx12_buffer_submit(plan->material_instances);
}
#endif
hr = ID3D12GraphicsCommandList_Reset(cl, ca, NULL);
if (FAILED(hr)) {
sys_panic(LIT("Failed to reset command list"));
}
/* Create temporary srv heap */
/* Create temporary descriptor heap */
/* NOTE: This should always occur after buffers are submitted */
struct gpu_descriptor_heap *temp_descriptor_heap = gpu_descriptor_heap_alloc(G.cbv_srv_uav_heap);
/* Material pass */
{
u32 instance_count = 0;
(UNUSED)plan;
if (plan->material_instances->count > 0) {
//struct pipeline *pipeline = dx12_get_pipeline(pipeline_scope, LIT("material"));
struct pipeline *pipeline = &G.test_pipeline;
@ -1661,9 +1677,14 @@ void gpu_dispatch(struct gpu_dispatch_params params)
ID3D12GraphicsCommandList_SetGraphicsRootSignature(cl, pipeline->rootsig);
/* Bind constant buffer */
/* TODO */
#if 0
struct dx12_buffer *constant_buffer = plan->constant_buffer;
if (!constant_buffer) {
constant_buffer = dx12_buffer_alloc();
}
#endif
/* Bind srv heap */
/* Bind descriptor heap */
ID3D12DescriptorHeap *heaps[] = { temp_descriptor_heap->heap };
ID3D12GraphicsCommandList_SetDescriptorHeaps(cl, ARRAY_COUNT(heaps), heaps);
ID3D12GraphicsCommandList_SetGraphicsRootDescriptorTable(cl, 2, temp_descriptor_heap->gpu_handle);
@ -1682,7 +1703,7 @@ void gpu_dispatch(struct gpu_dispatch_params params)
ID3D12GraphicsCommandList_IASetPrimitiveTopology(cl, D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
ID3D12GraphicsCommandList_IASetVertexBuffers(cl, 0, 1, &G.dummy_vertex_buffer_view);
ID3D12GraphicsCommandList_IASetIndexBuffer(cl, &G.quad_index_buffer_view);
ID3D12GraphicsCommandList_DrawIndexedInstanced(cl, 6, instance_count, 0, 0, 0);
ID3D12GraphicsCommandList_DrawIndexedInstanced(cl, 6, plan->material_instances->count, 0, 0, 0);
/* Reset render target */
dx12_resource_barrier(cl, target, old_state);
@ -1694,7 +1715,7 @@ void gpu_dispatch(struct gpu_dispatch_params params)
sys_panic(LIT("Failed to close command list before execution"));
}
gpu_descriptor_heap_release(temp_descriptor_heap, cq);
gpu_descriptor_heap_release(cq, temp_descriptor_heap);
#if 0
__prof;
@ -1779,14 +1800,6 @@ void gpu_dispatch(struct gpu_dispatch_params params)
sprite_scope_end(sprite_scope);
#endif
}
#else
void gpu_dispatch(struct gpu_handle gpu_dispatch_state, struct gpu_dispatch_params params)
{
(UNUSED)gpu_dispatch_state;
(UNUSED)params;
(UNUSED)handle_get_data;
}
#endif
/* ========================== *
* Present