create & track gpu descriptor heaps on command lists

This commit is contained in:
jacob 2025-06-18 18:38:06 -05:00
parent b93973720f
commit 1e06520d99
3 changed files with 321 additions and 227 deletions

View File

@ -51,7 +51,7 @@ struct arena *arena_alloc(u64 reserve)
void arena_release(struct arena *arena) void arena_release(struct arena *arena)
{ {
ASAN_UNPOISON(arena->reserve_, arena->committed + ARENA_HEADER_SIZE); ASAN_UNPOISON(arena, arena->committed + ARENA_HEADER_SIZE);
__prof; __prof;
__proffree(arena); __proffree(arena);
gstat_sub(GSTAT_MEMORY_COMMITTED, arena->committed); gstat_sub(GSTAT_MEMORY_COMMITTED, arena->committed);

View File

@ -80,7 +80,7 @@
#define DX12_TEST 0 #define DX12_TEST 1

View File

@ -10,6 +10,7 @@
#include "work.h" #include "work.h"
#include "log.h" #include "log.h"
#include "resource.h" #include "resource.h"
#include "atomic.h"
#pragma warning(push, 0) #pragma warning(push, 0)
# define UNICODE # define UNICODE
@ -28,6 +29,7 @@
#pragma comment(lib, "d3dcompiler") #pragma comment(lib, "d3dcompiler")
#define SH_CPU 1 #define SH_CPU 1
#define VT lpVtbl
//#define DX12_WAIT_FRAME_LATENCY 1 //#define DX12_WAIT_FRAME_LATENCY 1
//#define DX12_SWAPCHAIN_FLAGS ((DX12_ALLOW_TEARING * DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING) | (DX12_WAIT_FRAME_LATENCY * DXGI_SWAP_CHAIN_FLAG_FRAME_LATENCY_WAITABLE_OBJECT)) //#define DX12_SWAPCHAIN_FLAGS ((DX12_ALLOW_TEARING * DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING) | (DX12_WAIT_FRAME_LATENCY * DXGI_SWAP_CHAIN_FLAG_FRAME_LATENCY_WAITABLE_OBJECT))
@ -81,9 +83,41 @@ struct pipeline_error {
}; };
struct command_queue { struct command_queue {
ID3D12CommandQueue *cq;
struct arena *arena; struct arena *arena;
struct sys_mutex *mutex; struct sys_mutex *mutex;
ID3D12CommandQueue *cq;
struct command_list *first_submitted_command_list;
struct command_list *last_submitted_command_list;
struct atomic_u64 fence_target;
ID3D12Fence *fence;
};
struct command_list {
struct command_queue *cq;
struct ID3D12GraphicsCommandList *cl;
struct ID3D12CommandAllocator *ca;
struct command_descriptor_heap *first_command_descriptor_heap;
u64 submitted_fence_target;
struct command_list *prev_submitted;
struct command_list *next_submitted;
};
struct command_descriptor_heap {
D3D12_DESCRIPTOR_HEAP_TYPE type;
ID3D12DescriptorHeap *heap;
D3D12_CPU_DESCRIPTOR_HANDLE cpu_handle;
D3D12_GPU_DESCRIPTOR_HANDLE gpu_handle;
struct command_descriptor_heap *next_in_command_list;
u64 submitted_fence_target;
struct command_queue *submitted_cq;
struct command_descriptor_heap *prev_submitted;
struct command_descriptor_heap *next_submitted;
}; };
struct descriptor { struct descriptor {
@ -110,6 +144,7 @@ struct dx12_resource {
D3D12_GPU_VIRTUAL_ADDRESS gpu_address; /* NOTE: 0 for textures */ D3D12_GPU_VIRTUAL_ADDRESS gpu_address; /* NOTE: 0 for textures */
struct v2i32 texture_size;
struct dx12_resource *next_free; struct dx12_resource *next_free;
}; };
@ -128,20 +163,6 @@ struct cpu_descriptor_heap {
struct D3D12_CPU_DESCRIPTOR_HANDLE handle; struct D3D12_CPU_DESCRIPTOR_HANDLE handle;
}; };
struct gpu_descriptor_heap {
D3D12_DESCRIPTOR_HEAP_TYPE type;
ID3D12DescriptorHeap *heap;
D3D12_CPU_DESCRIPTOR_HANDLE cpu_handle;
D3D12_GPU_DESCRIPTOR_HANDLE gpu_handle;
/* If free_fence < free_fence_value, then descriptor heap is in use by the GPU */
ID3D12Fence *free_fence;
u64 free_fence_value;
struct gpu_descriptor_heap *prev_free;
struct gpu_descriptor_heap *next_free;
};
enum handle_kind { enum handle_kind {
DX12_HANDLE_KIND_NONE, DX12_HANDLE_KIND_NONE,
DX12_HANDLE_KIND_RESOURCE, DX12_HANDLE_KIND_RESOURCE,
@ -171,10 +192,10 @@ GLOBAL struct {
u64 num_handle_entries_reserved; u64 num_handle_entries_reserved;
/* Descriptor heaps pool */ /* Descriptor heaps pool */
struct sys_mutex *gpu_descriptor_heaps_mutex; struct sys_mutex *command_descriptor_heaps_mutex;
struct arena *gpu_descriptor_heaps_arena; struct arena *command_descriptor_heaps_arena;
struct gpu_descriptor_heap *first_free_gpu_descriptor_heap; struct command_descriptor_heap *first_submitted_command_descriptor_heap;
struct gpu_descriptor_heap *last_free_gpu_descriptor_heap; struct command_descriptor_heap *last_submitted_command_descriptor_heap;
/* Resources pool */ /* Resources pool */
struct sys_mutex *resources_mutex; struct sys_mutex *resources_mutex;
@ -255,8 +276,8 @@ struct gpu_startup_receipt gpu_startup(struct work_startup_receipt *work_sr, str
G.handle_entries_arena = arena_alloc(GIGABYTE(64)); G.handle_entries_arena = arena_alloc(GIGABYTE(64));
/* Initialize gpu descriptor heaps pool */ /* Initialize gpu descriptor heaps pool */
G.gpu_descriptor_heaps_mutex = sys_mutex_alloc(); G.command_descriptor_heaps_mutex = sys_mutex_alloc();
G.gpu_descriptor_heaps_arena = arena_alloc(GIGABYTE(64)); G.command_descriptor_heaps_arena = arena_alloc(GIGABYTE(64));
/* Initialize resources pool */ /* Initialize resources pool */
G.resources_mutex = sys_mutex_alloc(); G.resources_mutex = sys_mutex_alloc();
@ -1202,7 +1223,12 @@ INTERNAL struct command_queue *command_queue_alloc(enum D3D12_COMMAND_LIST_TYPE
desc.Priority = priority; desc.Priority = priority;
HRESULT hr = ID3D12Device_CreateCommandQueue(G.device, &desc, &IID_ID3D12CommandQueue, (void **)&cq->cq); HRESULT hr = ID3D12Device_CreateCommandQueue(G.device, &desc, &IID_ID3D12CommandQueue, (void **)&cq->cq);
if (FAILED(hr)) { if (FAILED(hr)) {
dx12_init_error(LIT("Failed to create command queue")); sys_panic(LIT("Failed to create command queue"));
}
hr = ID3D12Device_CreateFence(G.device, 0, 0, &IID_ID3D12Fence, (void **)&cq->fence);
if (FAILED(hr)) {
sys_panic(LIT("Failed to create command queue fence"));
} }
return cq; return cq;
@ -1215,6 +1241,244 @@ INTERNAL void command_queue_release(struct command_queue *cq)
//ID3D12CommandQueue_Release(G.cq_copy_background->cq); //ID3D12CommandQueue_Release(G.cq_copy_background->cq);
} }
/* ========================== *
* Command list
* ========================== */
INTERNAL struct command_list *command_list_open(struct command_queue *cq)
{
u64 queue_fence_value = ID3D12Fence_GetCompletedValue(cq->fence);
struct command_list *cl = NULL;
struct ID3D12GraphicsCommandList *old_cl = NULL;
struct ID3D12CommandAllocator *old_ca = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(cq->mutex);
/* Find first command list ready for reuse */
for (struct command_list *tmp = cq->first_submitted_command_list; tmp; tmp = tmp->next_submitted) {
if (queue_fence_value >= tmp->submitted_fence_target) {
cl = tmp;
break;
}
}
if (cl) {
/* Remove from submitted list */
old_cl = cl->cl;
old_ca = cl->ca;
struct command_list *prev = cl->prev_submitted;
struct command_list *next = cl->next_submitted;
if (prev) {
prev->next_submitted = next;
} else {
cq->first_submitted_command_list = next;
}
if (next) {
next->prev_submitted = prev;
} else {
cq->last_submitted_command_list = prev;
}
} else {
cl = arena_push_no_zero(cq->arena, struct command_list);
}
sys_mutex_unlock(&lock);
}
MEMZERO_STRUCT(cl);
cl->cq = cq;
HRESULT hr = 0;
/* FIXME: Determine command list type from command queue */
if (old_cl) {
cl->cl = old_cl;
cl->ca = old_ca;
} else {
hr = ID3D12Device_CreateCommandAllocator(G.device, D3D12_COMMAND_LIST_TYPE_DIRECT, &IID_ID3D12CommandAllocator, (void **)&cl->ca);
if (FAILED(hr)) {
sys_panic(LIT("Failed to create command allocator"));
}
hr = ID3D12Device_CreateCommandList(G.device, 0, D3D12_COMMAND_LIST_TYPE_DIRECT, cl->ca, NULL, &IID_ID3D12GraphicsCommandList, (void **)&cl->cl);
if (FAILED(hr)) {
sys_panic(LIT("Failed to create command list"));
}
hr = ID3D12GraphicsCommandList_Close(cl->cl);
if (FAILED(hr)) {
sys_panic(LIT("Failed to close command list during initialization"));
}
}
/* Close */
hr = ID3D12CommandAllocator_Reset(cl->ca);
if (FAILED(hr)) {
sys_panic(LIT("Failed to reset command allocator"));
}
hr = ID3D12GraphicsCommandList_Reset(cl->cl, cl->ca, NULL);
if (FAILED(hr)) {
sys_panic(LIT("Failed to reset command list"));
}
return cl;
}
/* TODO: Allow multiple command list submissions */
INTERNAL void command_list_close(struct command_list *cl)
{
struct command_queue *cq = cl->cq;
/* Close & execute */
HRESULT hr = ID3D12GraphicsCommandList_Close(cl->cl);
if (FAILED(hr)) {
sys_panic(LIT("Failed to close command list before execution"));
}
ID3D12CommandQueue_ExecuteCommandLists(cq->cq, 1, (ID3D12CommandList **)&cl->cl);
/* Queue fence signal */
u64 target_fence_value = atomic_u64_eval_add_u64(&cq->fence_target, 1) + 1;
ID3D12CommandQueue_Signal(cq->cq, cq->fence, target_fence_value);
/* Add descriptor heaps to submitted list */
{
struct sys_lock lock = sys_mutex_lock_e(G.command_descriptor_heaps_mutex);
for (struct command_descriptor_heap *cdh = cl->first_command_descriptor_heap; cdh; cdh = cdh->next_in_command_list) {
cdh->submitted_cq = cq;
cdh->submitted_fence_target = target_fence_value;
cdh->next_submitted = G.first_submitted_command_descriptor_heap;
if (G.last_submitted_command_descriptor_heap) {
G.last_submitted_command_descriptor_heap->next_submitted = cdh;
} else {
G.first_submitted_command_descriptor_heap = cdh;
}
G.last_submitted_command_descriptor_heap = cdh;
G.first_submitted_command_descriptor_heap = cdh;
}
sys_mutex_unlock(&lock);
}
/* Add command list to submitted list */
cl->submitted_fence_target = target_fence_value;
{
struct sys_lock lock = sys_mutex_lock_e(cq->mutex);
if (cq->last_submitted_command_list) {
cq->last_submitted_command_list->next_submitted = cl;
} else {
cq->first_submitted_command_list = cl;
}
cq->last_submitted_command_list = cl;
sys_mutex_unlock(&lock);
}
}
/* ========================== *
* Command descriptor heap (GPU / shader visible descriptor heap)
* ========================== */
INTERNAL struct command_descriptor_heap *command_list_push_descriptor_heap(struct command_list *cl, struct cpu_descriptor_heap *dh_cpu)
{
ASSERT(dh_cpu->type == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV); /* Src heap must have expected type */
/* Allocate GPU heap */
struct command_descriptor_heap *cdh = NULL;
ID3D12DescriptorHeap *old_heap = NULL;
D3D12_CPU_DESCRIPTOR_HANDLE old_cpu_handle = ZI;
D3D12_GPU_DESCRIPTOR_HANDLE old_gpu_handle = ZI;
{
struct sys_lock lock = sys_mutex_lock_e(G.command_descriptor_heaps_mutex);
/* Find first heap ready for reuse */
for (struct command_descriptor_heap *tmp = G.first_submitted_command_descriptor_heap; tmp; tmp = tmp->next_submitted) {
/* TODO: Cache completed fence values */
u64 queue_fence_value = ID3D12Fence_GetCompletedValue(tmp->submitted_cq->fence);
if (queue_fence_value >= tmp->submitted_fence_target) {
cdh = tmp;
break;
}
}
if (cdh) {
/* Remove from submitted list */
old_heap = cdh->heap;
old_cpu_handle = cdh->cpu_handle;
old_gpu_handle = cdh->gpu_handle;
struct command_descriptor_heap *prev = cdh->prev_submitted;
struct command_descriptor_heap *next = cdh->next_submitted;
if (prev) {
prev->next_submitted = next;
} else {
G.first_submitted_command_descriptor_heap = next;
}
if (next) {
next->prev_submitted = prev;
} else {
G.last_submitted_command_descriptor_heap = prev;
}
} else {
/* No available heap available for reuse, allocate new */
cdh = arena_push_no_zero(G.command_descriptor_heaps_arena, struct command_descriptor_heap);
}
sys_mutex_unlock(&lock);
}
MEMZERO_STRUCT(cdh);
if (old_heap) {
cdh->heap = old_heap;
cdh->cpu_handle = old_cpu_handle;
cdh->gpu_handle = old_gpu_handle;
} else {
D3D12_DESCRIPTOR_HEAP_DESC desc = ZI;
desc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV;
desc.NumDescriptors = DX12_NUM_CBV_SRV_UAV_DESCRIPTORS;
desc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE;
HRESULT hr = ID3D12Device_CreateDescriptorHeap(G.device, &desc, &IID_ID3D12DescriptorHeap, (void **)&cdh->heap);
if (FAILED(hr)) {
sys_panic(LIT("Failed to create GPU descriptor heap"));
}
ID3D12DescriptorHeap_GetCPUDescriptorHandleForHeapStart(cdh->heap, &cdh->cpu_handle);
ID3D12DescriptorHeap_GetGPUDescriptorHandleForHeapStart(cdh->heap, &cdh->gpu_handle);
}
/* Copy CPU heap */
{
struct sys_lock lock = sys_mutex_lock_s(dh_cpu->mutex);
ID3D12Device_CopyDescriptorsSimple(G.device, dh_cpu->num_descriptors_reserved, cdh->cpu_handle, dh_cpu->handle, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
sys_mutex_unlock(&lock);
}
/* Insert into command list */
cdh->next_in_command_list = cl->first_command_descriptor_heap;
cl->first_command_descriptor_heap = cdh;
return cdh;
}
/* ========================== *
* Descriptor
* ========================== */
INTERNAL struct descriptor *descriptor_alloc(struct cpu_descriptor_heap *dh)
{
struct descriptor *d = NULL;
D3D12_CPU_DESCRIPTOR_HANDLE handle = ZI;
{
struct sys_lock lock = sys_mutex_lock_e(dh->mutex);
if (dh->first_free_descriptor) {
d = dh->first_free_descriptor;
handle = d->handle;
} else {
if (dh->num_descriptors_reserved >= dh->num_descriptors_capacity) {
sys_panic(LIT("Max descriptors reached in heap"));
}
d = arena_push_no_zero(dh->arena, struct descriptor);
handle.ptr = dh->handle.ptr + (dh->num_descriptors_reserved * dh->descriptor_size);
++dh->num_descriptors_reserved;
}
sys_mutex_unlock(&lock);
}
MEMZERO_STRUCT(d);
d->heap = dh;
d->handle = handle;
return d;
}
/* ========================== * /* ========================== *
* CPU descriptor heap * CPU descriptor heap
* ========================== */ * ========================== */
@ -1262,140 +1526,6 @@ INTERNAL void cpu_descriptor_heap_release(struct cpu_descriptor_heap *dh)
} }
#endif #endif
/* ========================== *
* Descriptor
* ========================== */
INTERNAL struct descriptor *descriptor_alloc(struct cpu_descriptor_heap *dh)
{
struct descriptor *d = NULL;
D3D12_CPU_DESCRIPTOR_HANDLE handle = ZI;
{
struct sys_lock lock = sys_mutex_lock_e(dh->mutex);
if (dh->first_free_descriptor) {
d = dh->first_free_descriptor;
handle = d->handle;
} else {
if (dh->num_descriptors_reserved >= dh->num_descriptors_capacity) {
sys_panic(LIT("Max descriptors reached in heap"));
}
d = arena_push_no_zero(dh->arena, struct descriptor);
handle.ptr = dh->handle.ptr + (dh->num_descriptors_reserved * dh->descriptor_size);
++dh->num_descriptors_reserved;
}
sys_mutex_unlock(&lock);
}
MEMZERO_STRUCT(d);
d->heap = dh;
d->handle = handle;
return d;
}
/* ========================== *
* GPU (shader visible) descriptor heap
* ========================== */
INTERNAL struct gpu_descriptor_heap *gpu_descriptor_heap_alloc(struct cpu_descriptor_heap *dh_cpu)
{
ASSERT(dh_cpu->type == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV); /* Src heap must have expected type */
/* Allocate GPU heap */
struct gpu_descriptor_heap *dh_gpu = NULL;
ID3D12DescriptorHeap *heap = NULL;
D3D12_CPU_DESCRIPTOR_HANDLE cpu_handle = ZI;
D3D12_GPU_DESCRIPTOR_HANDLE gpu_handle = ZI;
ID3D12Fence *free_fence = NULL;
u64 free_fence_value = 0;
{
struct sys_lock lock = sys_mutex_lock_e(G.gpu_descriptor_heaps_mutex);
/* Find first free & ready heap for reuse */
/* FIXME: Rather than storing fence per heap, store & increment fence per queue and check against it */
for (struct gpu_descriptor_heap *tmp = G.first_free_gpu_descriptor_heap; tmp; tmp = tmp->next_free) {
if (ID3D12Fence_GetCompletedValue(tmp->free_fence) >= tmp->free_fence_value) {
dh_gpu = tmp;
break;
}
}
if (dh_gpu) {
/* Free & ready heap found */
dh_gpu = G.first_free_gpu_descriptor_heap;
heap = dh_gpu->heap;
cpu_handle = dh_gpu->cpu_handle;
gpu_handle = dh_gpu->gpu_handle;
free_fence = dh_gpu->free_fence;
free_fence_value = dh_gpu->free_fence_value;
/* Remove from free list */
struct gpu_descriptor_heap *prev = dh_gpu->prev_free;
struct gpu_descriptor_heap *next = dh_gpu->next_free;
if (prev) {
prev->next_free = next;
} else {
G.first_free_gpu_descriptor_heap = next;
}
if (next) {
next->prev_free = prev;
} else {
G.last_free_gpu_descriptor_heap = prev;
}
} else {
/* No available heap available for reuse, allocate new */
dh_gpu = arena_push_no_zero(G.gpu_descriptor_heaps_arena, struct gpu_descriptor_heap);
}
sys_mutex_unlock(&lock);
}
MEMZERO_STRUCT(dh_gpu);
if (!heap) {
D3D12_DESCRIPTOR_HEAP_DESC desc = ZI;
desc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV;
desc.NumDescriptors = DX12_NUM_CBV_SRV_UAV_DESCRIPTORS;
desc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE;
HRESULT hr = ID3D12Device_CreateDescriptorHeap(G.device, &desc, &IID_ID3D12DescriptorHeap, (void **)&heap);
if (FAILED(hr)) {
sys_panic(LIT("Failed to create GPU descriptor heap"));
}
ID3D12DescriptorHeap_GetCPUDescriptorHandleForHeapStart(heap, &cpu_handle);
ID3D12DescriptorHeap_GetGPUDescriptorHandleForHeapStart(heap, &gpu_handle);
hr = ID3D12Device_CreateFence(G.device, 0, 0, &IID_ID3D12Fence, (void **)&free_fence);
if (FAILED(hr)) {
sys_panic(LIT("Failed to create GPU descriptor heap fence"));
}
}
dh_gpu->heap = heap;
dh_gpu->cpu_handle = cpu_handle;
dh_gpu->gpu_handle = gpu_handle;
dh_gpu->free_fence = free_fence;
dh_gpu->free_fence_value = free_fence_value;
/* Copy CPU heap */
{
struct sys_lock lock = sys_mutex_lock_s(dh_cpu->mutex);
ID3D12Device_CopyDescriptorsSimple(G.device, dh_cpu->num_descriptors_reserved, dh_gpu->cpu_handle, dh_cpu->handle, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
sys_mutex_unlock(&lock);
}
return dh_gpu;
}
INTERNAL void gpu_descriptor_heap_release(struct command_queue *cq, struct gpu_descriptor_heap *dh)
{
/* Queue fence signal */
++dh->free_fence_value;
ID3D12CommandQueue_Signal(cq->cq, dh->free_fence, dh->free_fence_value);
/* Add to free list */
struct sys_lock lock = sys_mutex_lock_e(G.gpu_descriptor_heaps_mutex);
dh->next_free = G.first_free_gpu_descriptor_heap;
if (G.last_free_gpu_descriptor_heap) {
G.last_free_gpu_descriptor_heap->next_free = dh;
} else {
G.first_free_gpu_descriptor_heap = dh;
}
G.last_free_gpu_descriptor_heap = dh;
G.first_free_gpu_descriptor_heap = dh;
sys_mutex_unlock(&lock);
}
/* ========================== * /* ========================== *
* Buffer * Buffer
* ========================== */ * ========================== */
@ -1411,9 +1541,6 @@ struct dx12_buffer {
/* TODO: Move command list out of plan struct */ /* TODO: Move command list out of plan struct */
struct plan { struct plan {
struct arena *arena; struct arena *arena;
ID3D12CommandAllocator *ca_direct;
ID3D12GraphicsCommandList *cl_direct;
struct dx12_buffer *material_instances; struct dx12_buffer *material_instances;
struct plan *next_free; struct plan *next_free;
@ -1421,7 +1548,6 @@ struct plan {
INTERNAL struct plan *plan_alloc(void) INTERNAL struct plan *plan_alloc(void)
{ {
HRESULT hr = 0;
struct plan *plan = NULL; struct plan *plan = NULL;
{ {
struct arena *arena = arena_alloc(MEGABYTE(64)); struct arena *arena = arena_alloc(MEGABYTE(64));
@ -1429,21 +1555,6 @@ INTERNAL struct plan *plan_alloc(void)
plan->arena = arena; plan->arena = arena;
} }
hr = ID3D12Device_CreateCommandAllocator(G.device, D3D12_COMMAND_LIST_TYPE_DIRECT, &IID_ID3D12CommandAllocator, (void **)&plan->ca_direct);
if (FAILED(hr)) {
sys_panic(LIT("Failed to create command allocator"));
}
hr = ID3D12Device_CreateCommandList(G.device, 0, D3D12_COMMAND_LIST_TYPE_DIRECT, plan->ca_direct, NULL, &IID_ID3D12GraphicsCommandList, (void **)&plan->cl_direct);
if (FAILED(hr)) {
sys_panic(LIT("Failed to create command list"));
}
hr = ID3D12GraphicsCommandList_Close(plan->cl_direct);
if (FAILED(hr)) {
sys_panic(LIT("Failed to close command list during initialization"));
}
return plan; return plan;
} }
@ -1523,6 +1634,11 @@ INTERNAL struct dx12_resource *dx12_resource_alloc(D3D12_HEAP_PROPERTIES heap_pr
return r; return r;
} }
INTERNAL void dx12_resource_release(struct dx12_resource *t)
{
(UNUSED)t;
}
INTERNAL enum D3D12_RESOURCE_STATES dx12_resource_barrier(ID3D12GraphicsCommandList *cl, struct dx12_resource *resource, enum D3D12_RESOURCE_STATES state) INTERNAL enum D3D12_RESOURCE_STATES dx12_resource_barrier(ID3D12GraphicsCommandList *cl, struct dx12_resource *resource, enum D3D12_RESOURCE_STATES state)
{ {
enum D3D12_RESOURCE_STATES old_state = resource->state; enum D3D12_RESOURCE_STATES old_state = resource->state;
@ -1545,11 +1661,6 @@ INTERNAL enum D3D12_RESOURCE_STATES dx12_resource_barrier(ID3D12GraphicsCommandL
return old_state; return old_state;
} }
INTERNAL void dx12_resource_release(struct dx12_resource *t)
{
(UNUSED)t;
}
struct gpu_handle gpu_texture_alloc(enum gpu_texture_format format, u32 flags, struct v2i32 size, void *initial_data) struct gpu_handle gpu_texture_alloc(enum gpu_texture_format format, u32 flags, struct v2i32 size, void *initial_data)
{ {
LOCAL_PERSIST const DXGI_FORMAT formats[] = { LOCAL_PERSIST const DXGI_FORMAT formats[] = {
@ -1562,7 +1673,6 @@ struct gpu_handle gpu_texture_alloc(enum gpu_texture_format format, u32 flags, s
dxgi_format = formats[format]; dxgi_format = formats[format];
} }
if (format == 0) { if (format == 0) {
/* TODO: Don't panic */
sys_panic(LIT("Tried to create texture with unknown format")); sys_panic(LIT("Tried to create texture with unknown format"));
} }
@ -1592,6 +1702,7 @@ struct gpu_handle gpu_texture_alloc(enum gpu_texture_format format, u32 flags, s
D3D12_RESOURCE_STATES initial_state = D3D12_RESOURCE_STATE_COPY_DEST; D3D12_RESOURCE_STATES initial_state = D3D12_RESOURCE_STATE_COPY_DEST;
struct dx12_resource *r = dx12_resource_alloc(heap_props, heap_flags, desc, initial_state, view_flags); struct dx12_resource *r = dx12_resource_alloc(heap_props, heap_flags, desc, initial_state, view_flags);
r->texture_size = size;
(UNUSED)initial_data; (UNUSED)initial_data;
@ -1606,8 +1717,11 @@ void gpu_texture_clear(struct gpu_handle target_resource, u32 clear_color)
struct v2i32 gpu_texture_get_size(struct gpu_handle resource) struct v2i32 gpu_texture_get_size(struct gpu_handle resource)
{ {
(UNUSED)resource;
struct v2i32 res = ZI; struct v2i32 res = ZI;
struct dx12_resource *dx12_resource = handle_get_data(resource, DX12_HANDLE_KIND_RESOURCE);
if (dx12_resource) {
res = dx12_resource->texture_size;
}
return res; return res;
} }
@ -1621,23 +1735,16 @@ void gpu_dispatch(struct gpu_dispatch_params params)
struct plan *plan = handle_get_data(params.plan, DX12_HANDLE_KIND_PLAN); struct plan *plan = handle_get_data(params.plan, DX12_HANDLE_KIND_PLAN);
struct dx12_resource *target = handle_get_data(params.draw_target, DX12_HANDLE_KIND_RESOURCE); struct dx12_resource *target = handle_get_data(params.draw_target, DX12_HANDLE_KIND_RESOURCE);
HRESULT hr = 0;
struct command_queue *cq = G.cq_direct; struct command_queue *cq = G.cq_direct;
ID3D12CommandAllocator *ca = plan->ca_direct; struct command_list *cl = command_list_open(cq);
ID3D12GraphicsCommandList *cl = plan->cl_direct; /* Push buffers */
#if 0
/* FIXME: Use fence to ensure command allocator has finished execution on GPU before resetting */ {
/* TODO: Reuse temporary command lists in same way as gpu descriptor heaps */ dx12_buffer_submit(plan->material_instances);
hr = ID3D12CommandAllocator_Reset(ca);
if (FAILED(hr)) {
sys_panic(LIT("Failed to reset command allocator"));
}
hr = ID3D12GraphicsCommandList_Reset(cl, ca, NULL);
if (FAILED(hr)) {
sys_panic(LIT("Failed to reset command list"));
} }
#endif
struct command_descriptor_heap *descriptor_heap = command_list_push_descriptor_heap(cl, G.cbv_srv_uav_heap);
/* Viewport */ /* Viewport */
struct rect viewport = params.draw_target_viewport; struct rect viewport = params.draw_target_viewport;
@ -1656,16 +1763,8 @@ void gpu_dispatch(struct gpu_dispatch_params params)
d3d12_scissor.right = viewport.x + viewport.width; d3d12_scissor.right = viewport.x + viewport.width;
d3d12_scissor.bottom = viewport.y + viewport.height; d3d12_scissor.bottom = viewport.y + viewport.height;
/* Submit buffers */
#if 0
{
dx12_buffer_submit(plan->material_instances);
}
#endif
/* Create temporary descriptor heap */ /* Create temporary descriptor heap */
/* NOTE: This should always occur after buffers are submitted */ /* NOTE: This should always occur after buffers are submitted */
struct gpu_descriptor_heap *temp_descriptor_heap = gpu_descriptor_heap_alloc(G.cbv_srv_uav_heap);
/* Material pass */ /* Material pass */
//if (plan->material_instances->count > 0) { //if (plan->material_instances->count > 0) {
@ -1674,8 +1773,8 @@ void gpu_dispatch(struct gpu_dispatch_params params)
struct pipeline *pipeline = &G.test_pipeline; struct pipeline *pipeline = &G.test_pipeline;
/* Bind pipeline */ /* Bind pipeline */
ID3D12GraphicsCommandList_SetPipelineState(cl, pipeline->pso); ID3D12GraphicsCommandList_SetPipelineState(cl->cl, pipeline->pso);
ID3D12GraphicsCommandList_SetGraphicsRootSignature(cl, pipeline->rootsig); ID3D12GraphicsCommandList_SetGraphicsRootSignature(cl->cl, pipeline->rootsig);
/* Bind constant buffer */ /* Bind constant buffer */
#if 0 #if 0
@ -1686,37 +1785,32 @@ void gpu_dispatch(struct gpu_dispatch_params params)
#endif #endif
/* Bind descriptor heap */ /* Bind descriptor heap */
ID3D12DescriptorHeap *heaps[] = { temp_descriptor_heap->heap }; ID3D12DescriptorHeap *heaps[] = { descriptor_heap->heap };
ID3D12GraphicsCommandList_SetDescriptorHeaps(cl, ARRAY_COUNT(heaps), heaps); ID3D12GraphicsCommandList_SetDescriptorHeaps(cl->cl, ARRAY_COUNT(heaps), heaps);
ID3D12GraphicsCommandList_SetGraphicsRootDescriptorTable(cl, 2, temp_descriptor_heap->gpu_handle); ID3D12GraphicsCommandList_SetGraphicsRootDescriptorTable(cl->cl, 2, descriptor_heap->gpu_handle);
/* Setup Rasterizer State */ /* Setup Rasterizer State */
ID3D12GraphicsCommandList_RSSetViewports(cl, 1, &d3d12_viewport); ID3D12GraphicsCommandList_RSSetViewports(cl->cl, 1, &d3d12_viewport);
ID3D12GraphicsCommandList_RSSetScissorRects(cl, 1, &d3d12_scissor); ID3D12GraphicsCommandList_RSSetScissorRects(cl->cl, 1, &d3d12_scissor);
/* Transition render target */ /* Transition render target */
enum D3D12_RESOURCE_STATES old_state = dx12_resource_barrier(cl, target, D3D12_RESOURCE_STATE_RENDER_TARGET); enum D3D12_RESOURCE_STATES old_state = dx12_resource_barrier(cl->cl, target, D3D12_RESOURCE_STATE_RENDER_TARGET);
ID3D12GraphicsCommandList_OMSetRenderTargets(cl, 1, &target->rtv_descriptor->handle, false, NULL); ID3D12GraphicsCommandList_OMSetRenderTargets(cl->cl, 1, &target->rtv_descriptor->handle, false, NULL);
//f32 clear_color[] = { 0.0f, 0.0f, 0.0f, 0.0f }; //f32 clear_color[] = { 0.0f, 0.0f, 0.0f, 0.0f };
//ID3D12GraphicsCommandList_ClearRenderTargetView(cl, rtvHandle, clearColor, 0, nullptr); //ID3D12GraphicsCommandList_ClearRenderTargetView(cl->cl, rtvHandle, clearColor, 0, nullptr);
/* Draw */ /* Draw */
ID3D12GraphicsCommandList_IASetPrimitiveTopology(cl, D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST); ID3D12GraphicsCommandList_IASetPrimitiveTopology(cl->cl, D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
ID3D12GraphicsCommandList_IASetVertexBuffers(cl, 0, 1, &G.dummy_vertex_buffer_view); ID3D12GraphicsCommandList_IASetVertexBuffers(cl->cl, 0, 1, &G.dummy_vertex_buffer_view);
ID3D12GraphicsCommandList_IASetIndexBuffer(cl, &G.quad_index_buffer_view); ID3D12GraphicsCommandList_IASetIndexBuffer(cl->cl, &G.quad_index_buffer_view);
ID3D12GraphicsCommandList_DrawIndexedInstanced(cl, 6, plan->material_instances->count, 0, 0, 0); ID3D12GraphicsCommandList_DrawIndexedInstanced(cl->cl, 6, plan->material_instances->count, 0, 0, 0);
/* Reset render target */ /* Reset render target */
dx12_resource_barrier(cl, target, old_state); dx12_resource_barrier(cl->cl, target, old_state);
} }
/* Execute command list */ /* Execute command list */
hr = ID3D12GraphicsCommandList_Close(cl); command_list_close(cl);
if (FAILED(hr)) {
sys_panic(LIT("Failed to close command list before execution"));
}
gpu_descriptor_heap_release(cq, temp_descriptor_heap);
#if 0 #if 0
__prof; __prof;