dx12 command buffers

This commit is contained in:
jacob 2025-06-19 14:40:08 -05:00
parent 99431ebdfa
commit 40e7abcc4e
10 changed files with 483 additions and 291 deletions

View File

@ -489,6 +489,8 @@ struct sprite_tag {
#define STRING_FROM_STRUCT(ptr) (CPPCOMPAT_INITLIST_TYPE(struct string) { sizeof(*(ptr)), (u8 *)(ptr) }) #define STRING_FROM_STRUCT(ptr) (CPPCOMPAT_INITLIST_TYPE(struct string) { sizeof(*(ptr)), (u8 *)(ptr) })
#define STRING_FROM_ARENA(arena) (STRING((arena)->pos, arena_base(arena)))
/* String from static array */ /* String from static array */
#define STRING_FROM_ARRAY(a) \ #define STRING_FROM_ARRAY(a) \
( \ ( \

View File

@ -214,7 +214,7 @@ struct dx11_shader_desc {
#if RESOURCE_RELOADING #if RESOURCE_RELOADING
struct arena *includes_arena; struct arena *includes_arena;
struct sys_mutex *includes_mutex; struct sys_mutex *includes_mutex;
struct dict includes_dict; struct dict *includes_dict;
struct atomic_i32 is_dirty; struct atomic_i32 is_dirty;
#endif #endif
}; };
@ -700,7 +700,7 @@ INTERNAL void shader_add_include(struct dx11_shader_desc *desc, struct string in
{ {
__prof; __prof;
u64 hash = hash_fnv64(HASH_FNV64_BASIS, include_name_src); u64 hash = hash_fnv64(HASH_FNV64_BASIS, include_name_src);
struct dict *dict = &desc->includes_dict; struct dict *dict = desc->includes_dict;
struct sys_lock lock = sys_mutex_lock_e(desc->includes_mutex); struct sys_lock lock = sys_mutex_lock_e(desc->includes_mutex);
{ {
dict_set(desc->includes_arena, dict, hash, 1); dict_set(desc->includes_arena, dict, hash, 1);
@ -711,7 +711,7 @@ INTERNAL void shader_add_include(struct dx11_shader_desc *desc, struct string in
INTERNAL void shader_reset_includes(struct dx11_shader_desc *desc) INTERNAL void shader_reset_includes(struct dx11_shader_desc *desc)
{ {
__prof; __prof;
struct dict *dict = &desc->includes_dict; struct dict *dict = desc->includes_dict;
struct sys_lock lock = sys_mutex_lock_e(desc->includes_mutex); struct sys_lock lock = sys_mutex_lock_e(desc->includes_mutex);
{ {
dict_reset(dict); dict_reset(dict);
@ -730,7 +730,7 @@ INTERNAL b32 shader_set_dirty(struct string name)
atomic_i32_eval_exchange(&desc->is_dirty, 1); atomic_i32_eval_exchange(&desc->is_dirty, 1);
caused_dirty = true; caused_dirty = true;
} else { } else {
struct dict *includes_dict = &desc->includes_dict; struct dict *includes_dict = desc->includes_dict;
u64 hash = hash_fnv64(HASH_FNV64_BASIS, name); u64 hash = hash_fnv64(HASH_FNV64_BASIS, name);
struct sys_lock lock = sys_mutex_lock_e(desc->includes_mutex); struct sys_lock lock = sys_mutex_lock_e(desc->includes_mutex);
{ {

View File

@ -11,6 +11,8 @@
#include "log.h" #include "log.h"
#include "resource.h" #include "resource.h"
#include "atomic.h" #include "atomic.h"
#include "util.h"
#include "rand.h"
/* Include common shader types */ /* Include common shader types */
#define SH_CPU 1 #define SH_CPU 1
@ -45,6 +47,7 @@
/* Arbitrary limits */ /* Arbitrary limits */
#define DX12_NUM_CBV_SRV_UAV_DESCRIPTORS (1024 * 64) #define DX12_NUM_CBV_SRV_UAV_DESCRIPTORS (1024 * 64)
#define DX12_NUM_RTV_DESCRIPTORS (1024 * 1) #define DX12_NUM_RTV_DESCRIPTORS (1024 * 1)
#define DX12_COMMAND_BUFFER_MIN_SIZE (1024 * 64)
#if RTC #if RTC
# define DX12_DEBUG 1 # define DX12_DEBUG 1
@ -101,6 +104,7 @@ struct command_list {
struct ID3D12CommandAllocator *ca; struct ID3D12CommandAllocator *ca;
struct command_descriptor_heap *first_command_descriptor_heap; struct command_descriptor_heap *first_command_descriptor_heap;
struct command_buffer *first_command_buffer;
u64 submitted_fence_target; u64 submitted_fence_target;
struct command_list *prev_submitted; struct command_list *prev_submitted;
@ -121,6 +125,26 @@ struct command_descriptor_heap {
struct command_descriptor_heap *next_submitted; struct command_descriptor_heap *next_submitted;
}; };
struct command_buffer {
struct command_buffer_group *group;
struct dx12_resource *staging_resource;
struct dx12_resource *gpu_resource;
u64 count;
struct command_buffer *next_in_command_list;
u64 submitted_fence_target;
struct command_queue *submitted_cq;
struct command_buffer *prev_submitted;
struct command_buffer *next_submitted;
};
struct command_buffer_group {
struct command_buffer *first_submitted;
struct command_buffer *last_submitted;
};
struct descriptor { struct descriptor {
struct cpu_descriptor_heap *heap; struct cpu_descriptor_heap *heap;
D3D12_CPU_DESCRIPTOR_HANDLE handle; D3D12_CPU_DESCRIPTOR_HANDLE handle;
@ -198,6 +222,11 @@ GLOBAL struct {
struct command_descriptor_heap *first_submitted_command_descriptor_heap; struct command_descriptor_heap *first_submitted_command_descriptor_heap;
struct command_descriptor_heap *last_submitted_command_descriptor_heap; struct command_descriptor_heap *last_submitted_command_descriptor_heap;
/* Command buffers pool */
struct sys_mutex *command_buffers_mutex;
struct arena *command_buffers_arena;
struct dict *command_buffers_dict;
/* Resources pool */ /* Resources pool */
struct sys_mutex *resources_mutex; struct sys_mutex *resources_mutex;
struct arena *resources_arena; struct arena *resources_arena;
@ -276,10 +305,15 @@ struct gpu_startup_receipt gpu_startup(struct work_startup_receipt *work_sr, str
G.handle_entries_mutex = sys_mutex_alloc(); G.handle_entries_mutex = sys_mutex_alloc();
G.handle_entries_arena = arena_alloc(GIGABYTE(64)); G.handle_entries_arena = arena_alloc(GIGABYTE(64));
/* Initialize gpu descriptor heaps pool */ /* Initialize command descriptor heaps pool */
G.command_descriptor_heaps_mutex = sys_mutex_alloc(); G.command_descriptor_heaps_mutex = sys_mutex_alloc();
G.command_descriptor_heaps_arena = arena_alloc(GIGABYTE(64)); G.command_descriptor_heaps_arena = arena_alloc(GIGABYTE(64));
/* Initialize command buffers pool */
G.command_buffers_mutex = sys_mutex_alloc();
G.command_buffers_arena = arena_alloc(GIGABYTE(64));
G.command_buffers_dict = dict_init(G.command_buffers_arena, 4096);
/* Initialize resources pool */ /* Initialize resources pool */
G.resources_mutex = sys_mutex_alloc(); G.resources_mutex = sys_mutex_alloc();
G.resources_arena = arena_alloc(GIGABYTE(64)); G.resources_arena = arena_alloc(GIGABYTE(64));
@ -1183,252 +1217,6 @@ INTERNAL void pipeline_release(struct pipeline *pipeline)
} }
} }
/* ========================== *
* Command queue
* ========================== */
INTERNAL struct command_queue *command_queue_alloc(enum D3D12_COMMAND_LIST_TYPE type, enum D3D12_COMMAND_QUEUE_PRIORITY priority)
{
struct command_queue *cq = NULL;
{
struct arena *arena = arena_alloc(GIGABYTE(64));
cq = arena_push(arena, struct command_queue);
cq->arena = arena;
}
cq->mutex = sys_mutex_alloc();
D3D12_COMMAND_QUEUE_DESC desc = ZI;
desc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
desc.Type = type;
desc.Priority = priority;
HRESULT hr = ID3D12Device_CreateCommandQueue(G.device, &desc, &IID_ID3D12CommandQueue, (void **)&cq->cq);
if (FAILED(hr)) {
sys_panic(LIT("Failed to create command queue"));
}
hr = ID3D12Device_CreateFence(G.device, 0, 0, &IID_ID3D12Fence, (void **)&cq->fence);
if (FAILED(hr)) {
sys_panic(LIT("Failed to create command queue fence"));
}
return cq;
}
INTERNAL void command_queue_release(struct command_queue *cq)
{
/* TODO */
(UNUSED)cq;
//ID3D12CommandQueue_Release(G.cq_copy_background->cq);
}
/* ========================== *
* Command list
* ========================== */
INTERNAL struct command_list *command_list_open(struct command_queue *cq)
{
u64 queue_fence_value = ID3D12Fence_GetCompletedValue(cq->fence);
struct command_list *cl = NULL;
struct ID3D12GraphicsCommandList *old_cl = NULL;
struct ID3D12CommandAllocator *old_ca = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(cq->mutex);
/* Find first command list ready for reuse */
for (struct command_list *tmp = cq->first_submitted_command_list; tmp; tmp = tmp->next_submitted) {
if (queue_fence_value >= tmp->submitted_fence_target) {
cl = tmp;
break;
}
}
if (cl) {
/* Remove from submitted list */
old_cl = cl->cl;
old_ca = cl->ca;
struct command_list *prev = cl->prev_submitted;
struct command_list *next = cl->next_submitted;
if (prev) {
prev->next_submitted = next;
} else {
cq->first_submitted_command_list = next;
}
if (next) {
next->prev_submitted = prev;
} else {
cq->last_submitted_command_list = prev;
}
} else {
cl = arena_push_no_zero(cq->arena, struct command_list);
}
sys_mutex_unlock(&lock);
}
MEMZERO_STRUCT(cl);
cl->cq = cq;
HRESULT hr = 0;
/* FIXME: Determine command list type from command queue */
if (old_cl) {
cl->cl = old_cl;
cl->ca = old_ca;
} else {
hr = ID3D12Device_CreateCommandAllocator(G.device, D3D12_COMMAND_LIST_TYPE_DIRECT, &IID_ID3D12CommandAllocator, (void **)&cl->ca);
if (FAILED(hr)) {
sys_panic(LIT("Failed to create command allocator"));
}
hr = ID3D12Device_CreateCommandList(G.device, 0, D3D12_COMMAND_LIST_TYPE_DIRECT, cl->ca, NULL, &IID_ID3D12GraphicsCommandList, (void **)&cl->cl);
if (FAILED(hr)) {
sys_panic(LIT("Failed to create command list"));
}
hr = ID3D12GraphicsCommandList_Close(cl->cl);
if (FAILED(hr)) {
sys_panic(LIT("Failed to close command list during initialization"));
}
}
/* Close */
hr = ID3D12CommandAllocator_Reset(cl->ca);
if (FAILED(hr)) {
sys_panic(LIT("Failed to reset command allocator"));
}
hr = ID3D12GraphicsCommandList_Reset(cl->cl, cl->ca, NULL);
if (FAILED(hr)) {
sys_panic(LIT("Failed to reset command list"));
}
return cl;
}
/* TODO: Allow multiple command list submissions */
INTERNAL void command_list_close(struct command_list *cl)
{
struct command_queue *cq = cl->cq;
/* Close & execute */
HRESULT hr = ID3D12GraphicsCommandList_Close(cl->cl);
if (FAILED(hr)) {
sys_panic(LIT("Failed to close command list before execution"));
}
ID3D12CommandQueue_ExecuteCommandLists(cq->cq, 1, (ID3D12CommandList **)&cl->cl);
/* Queue fence signal */
u64 target_fence_value = atomic_u64_eval_add_u64(&cq->fence_target, 1) + 1;
ID3D12CommandQueue_Signal(cq->cq, cq->fence, target_fence_value);
/* Add descriptor heaps to submitted list */
{
struct sys_lock lock = sys_mutex_lock_e(G.command_descriptor_heaps_mutex);
for (struct command_descriptor_heap *cdh = cl->first_command_descriptor_heap; cdh; cdh = cdh->next_in_command_list) {
cdh->submitted_cq = cq;
cdh->submitted_fence_target = target_fence_value;
cdh->next_submitted = G.first_submitted_command_descriptor_heap;
if (G.last_submitted_command_descriptor_heap) {
G.last_submitted_command_descriptor_heap->next_submitted = cdh;
} else {
G.first_submitted_command_descriptor_heap = cdh;
}
G.last_submitted_command_descriptor_heap = cdh;
G.first_submitted_command_descriptor_heap = cdh;
}
sys_mutex_unlock(&lock);
}
/* Add command list to submitted list */
cl->submitted_fence_target = target_fence_value;
{
struct sys_lock lock = sys_mutex_lock_e(cq->mutex);
if (cq->last_submitted_command_list) {
cq->last_submitted_command_list->next_submitted = cl;
} else {
cq->first_submitted_command_list = cl;
}
cq->last_submitted_command_list = cl;
sys_mutex_unlock(&lock);
}
}
/* ========================== *
* Command descriptor heap (GPU / shader visible descriptor heap)
* ========================== */
INTERNAL struct command_descriptor_heap *command_list_push_descriptor_heap(struct command_list *cl, struct cpu_descriptor_heap *dh_cpu)
{
ASSERT(dh_cpu->type == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV); /* Src heap must have expected type */
/* Allocate GPU heap */
struct command_descriptor_heap *cdh = NULL;
ID3D12DescriptorHeap *old_heap = NULL;
D3D12_CPU_DESCRIPTOR_HANDLE old_cpu_handle = ZI;
D3D12_GPU_DESCRIPTOR_HANDLE old_gpu_handle = ZI;
{
struct sys_lock lock = sys_mutex_lock_e(G.command_descriptor_heaps_mutex);
/* Find first heap ready for reuse */
for (struct command_descriptor_heap *tmp = G.first_submitted_command_descriptor_heap; tmp; tmp = tmp->next_submitted) {
/* TODO: Cache completed fence values */
u64 queue_fence_value = ID3D12Fence_GetCompletedValue(tmp->submitted_cq->fence);
if (queue_fence_value >= tmp->submitted_fence_target) {
cdh = tmp;
break;
}
}
if (cdh) {
/* Remove from submitted list */
old_heap = cdh->heap;
old_cpu_handle = cdh->cpu_handle;
old_gpu_handle = cdh->gpu_handle;
struct command_descriptor_heap *prev = cdh->prev_submitted;
struct command_descriptor_heap *next = cdh->next_submitted;
if (prev) {
prev->next_submitted = next;
} else {
G.first_submitted_command_descriptor_heap = next;
}
if (next) {
next->prev_submitted = prev;
} else {
G.last_submitted_command_descriptor_heap = prev;
}
} else {
/* No available heap available for reuse, allocate new */
cdh = arena_push_no_zero(G.command_descriptor_heaps_arena, struct command_descriptor_heap);
}
sys_mutex_unlock(&lock);
}
MEMZERO_STRUCT(cdh);
if (old_heap) {
cdh->heap = old_heap;
cdh->cpu_handle = old_cpu_handle;
cdh->gpu_handle = old_gpu_handle;
} else {
D3D12_DESCRIPTOR_HEAP_DESC desc = ZI;
desc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV;
desc.NumDescriptors = DX12_NUM_CBV_SRV_UAV_DESCRIPTORS;
desc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE;
HRESULT hr = ID3D12Device_CreateDescriptorHeap(G.device, &desc, &IID_ID3D12DescriptorHeap, (void **)&cdh->heap);
if (FAILED(hr)) {
sys_panic(LIT("Failed to create GPU descriptor heap"));
}
ID3D12DescriptorHeap_GetCPUDescriptorHandleForHeapStart(cdh->heap, &cdh->cpu_handle);
ID3D12DescriptorHeap_GetGPUDescriptorHandleForHeapStart(cdh->heap, &cdh->gpu_handle);
}
/* Copy CPU heap */
{
struct sys_lock lock = sys_mutex_lock_s(dh_cpu->mutex);
ID3D12Device_CopyDescriptorsSimple(G.device, dh_cpu->num_descriptors_reserved, cdh->cpu_handle, dh_cpu->handle, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
sys_mutex_unlock(&lock);
}
/* Insert into command list */
cdh->next_in_command_list = cl->first_command_descriptor_heap;
cl->first_command_descriptor_heap = cdh;
return cdh;
}
/* ========================== * /* ========================== *
* Descriptor * Descriptor
@ -1507,14 +1295,6 @@ INTERNAL void cpu_descriptor_heap_release(struct cpu_descriptor_heap *dh)
} }
#endif #endif
/* ========================== *
* Buffer
* ========================== */
struct dx12_buffer {
u64 count;
};
/* ========================== * /* ========================== *
* Plan * Plan
* ========================== */ * ========================== */
@ -1522,7 +1302,8 @@ struct dx12_buffer {
/* TODO: Move command list out of plan struct */ /* TODO: Move command list out of plan struct */
struct plan { struct plan {
struct arena *arena; struct arena *arena;
struct dx12_buffer *material_instances;
struct arena *material_instances_arena;
struct plan *next_free; struct plan *next_free;
}; };
@ -1536,6 +1317,8 @@ INTERNAL struct plan *plan_alloc(void)
plan->arena = arena; plan->arena = arena;
} }
plan->material_instances_arena = arena_alloc(GIGABYTE(1));
return plan; return plan;
} }
@ -1731,6 +1514,412 @@ struct v2i32 gpu_texture_get_size(struct gpu_handle resource)
return res; return res;
} }
/* ========================== *
* Command queue
* ========================== */
INTERNAL struct command_queue *command_queue_alloc(enum D3D12_COMMAND_LIST_TYPE type, enum D3D12_COMMAND_QUEUE_PRIORITY priority)
{
struct command_queue *cq = NULL;
{
struct arena *arena = arena_alloc(GIGABYTE(64));
cq = arena_push(arena, struct command_queue);
cq->arena = arena;
}
cq->mutex = sys_mutex_alloc();
D3D12_COMMAND_QUEUE_DESC desc = ZI;
desc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE;
desc.Type = type;
desc.Priority = priority;
HRESULT hr = ID3D12Device_CreateCommandQueue(G.device, &desc, &IID_ID3D12CommandQueue, (void **)&cq->cq);
if (FAILED(hr)) {
sys_panic(LIT("Failed to create command queue"));
}
hr = ID3D12Device_CreateFence(G.device, 0, 0, &IID_ID3D12Fence, (void **)&cq->fence);
if (FAILED(hr)) {
sys_panic(LIT("Failed to create command queue fence"));
}
return cq;
}
INTERNAL void command_queue_release(struct command_queue *cq)
{
/* TODO */
(UNUSED)cq;
//ID3D12CommandQueue_Release(G.cq_copy_background->cq);
}
/* ========================== *
* Command list
* ========================== */
INTERNAL struct command_list *command_list_open(struct command_queue *cq)
{
u64 queue_fence_value = ID3D12Fence_GetCompletedValue(cq->fence);
struct command_list *cl = NULL;
struct ID3D12GraphicsCommandList *old_cl = NULL;
struct ID3D12CommandAllocator *old_ca = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(cq->mutex);
/* Find first command list ready for reuse */
for (struct command_list *tmp = cq->first_submitted_command_list; tmp; tmp = tmp->next_submitted) {
if (queue_fence_value >= tmp->submitted_fence_target) {
cl = tmp;
break;
}
}
if (cl) {
/* Remove from submitted list */
old_cl = cl->cl;
old_ca = cl->ca;
struct command_list *prev = cl->prev_submitted;
struct command_list *next = cl->next_submitted;
if (prev) {
prev->next_submitted = next;
} else {
cq->first_submitted_command_list = next;
}
if (next) {
next->prev_submitted = prev;
} else {
cq->last_submitted_command_list = prev;
}
} else {
cl = arena_push_no_zero(cq->arena, struct command_list);
}
sys_mutex_unlock(&lock);
}
MEMZERO_STRUCT(cl);
cl->cq = cq;
HRESULT hr = 0;
/* FIXME: Determine command list type from command queue */
if (old_cl) {
cl->cl = old_cl;
cl->ca = old_ca;
} else {
hr = ID3D12Device_CreateCommandAllocator(G.device, D3D12_COMMAND_LIST_TYPE_DIRECT, &IID_ID3D12CommandAllocator, (void **)&cl->ca);
if (FAILED(hr)) {
sys_panic(LIT("Failed to create command allocator"));
}
hr = ID3D12Device_CreateCommandList(G.device, 0, D3D12_COMMAND_LIST_TYPE_DIRECT, cl->ca, NULL, &IID_ID3D12GraphicsCommandList, (void **)&cl->cl);
if (FAILED(hr)) {
sys_panic(LIT("Failed to create command list"));
}
hr = ID3D12GraphicsCommandList_Close(cl->cl);
if (FAILED(hr)) {
sys_panic(LIT("Failed to close command list during initialization"));
}
}
/* Close */
hr = ID3D12CommandAllocator_Reset(cl->ca);
if (FAILED(hr)) {
sys_panic(LIT("Failed to reset command allocator"));
}
hr = ID3D12GraphicsCommandList_Reset(cl->cl, cl->ca, NULL);
if (FAILED(hr)) {
sys_panic(LIT("Failed to reset command list"));
}
return cl;
}
/* TODO: Allow multiple command list submissions */
INTERNAL void command_list_close(struct command_list *cl)
{
struct command_queue *cq = cl->cq;
/* Close & execute */
HRESULT hr = ID3D12GraphicsCommandList_Close(cl->cl);
if (FAILED(hr)) {
sys_panic(LIT("Failed to close command list before execution"));
}
ID3D12CommandQueue_ExecuteCommandLists(cq->cq, 1, (ID3D12CommandList **)&cl->cl);
/* Queue fence signal */
/* FIXME: Wrap execute & signal in mutex */
u64 target_fence_value = atomic_u64_eval_add_u64(&cq->fence_target, 1) + 1;
ID3D12CommandQueue_Signal(cq->cq, cq->fence, target_fence_value);
/* Add descriptor heaps to submitted list */
{
struct sys_lock lock = sys_mutex_lock_e(G.command_descriptor_heaps_mutex);
for (struct command_descriptor_heap *cdh = cl->first_command_descriptor_heap; cdh; cdh = cdh->next_in_command_list) {
cdh->submitted_cq = cq;
cdh->submitted_fence_target = target_fence_value;
if (G.last_submitted_command_descriptor_heap) {
G.last_submitted_command_descriptor_heap->next_submitted = cdh;
} else {
G.first_submitted_command_descriptor_heap = cdh;
}
G.last_submitted_command_descriptor_heap = cdh;
}
sys_mutex_unlock(&lock);
}
/* Add command buffers to submitted list */
{
struct sys_lock lock = sys_mutex_lock_e(G.command_buffers_mutex);
for (struct command_buffer *cb = cl->first_command_buffer; cb; cb = cb->next_in_command_list) {
struct command_buffer_group *group = cb->group;
cb->submitted_cq = cq;
cb->submitted_fence_target = target_fence_value;
if (group->last_submitted) {
group->last_submitted->next_submitted = cb;
} else {
group->first_submitted = cb;
}
group->last_submitted = cb;
}
sys_mutex_unlock(&lock);
}
/* Add command list to submitted list */
cl->submitted_fence_target = target_fence_value;
{
struct sys_lock lock = sys_mutex_lock_e(cq->mutex);
if (cq->last_submitted_command_list) {
cq->last_submitted_command_list->next_submitted = cl;
} else {
cq->first_submitted_command_list = cl;
}
cq->last_submitted_command_list = cl;
sys_mutex_unlock(&lock);
}
}
/* ========================== *
* Command descriptor heap (GPU / shader visible descriptor heap)
* ========================== */
INTERNAL struct command_descriptor_heap *command_list_push_descriptor_heap(struct command_list *cl, struct cpu_descriptor_heap *dh_cpu)
{
ASSERT(dh_cpu->type == D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV); /* Src heap must have expected type */
/* Allocate GPU heap */
struct command_descriptor_heap *cdh = NULL;
ID3D12DescriptorHeap *old_heap = NULL;
D3D12_CPU_DESCRIPTOR_HANDLE old_cpu_handle = ZI;
D3D12_GPU_DESCRIPTOR_HANDLE old_gpu_handle = ZI;
{
struct sys_lock lock = sys_mutex_lock_e(G.command_descriptor_heaps_mutex);
/* Find first heap ready for reuse */
for (struct command_descriptor_heap *tmp = G.first_submitted_command_descriptor_heap; tmp; tmp = tmp->next_submitted) {
/* TODO: Cache completed fence values */
u64 queue_fence_value = ID3D12Fence_GetCompletedValue(tmp->submitted_cq->fence);
if (queue_fence_value >= tmp->submitted_fence_target) {
cdh = tmp;
break;
}
}
if (cdh) {
/* Remove from submitted list */
old_heap = cdh->heap;
old_cpu_handle = cdh->cpu_handle;
old_gpu_handle = cdh->gpu_handle;
struct command_descriptor_heap *prev = cdh->prev_submitted;
struct command_descriptor_heap *next = cdh->next_submitted;
if (prev) {
prev->next_submitted = next;
} else {
G.first_submitted_command_descriptor_heap = next;
}
if (next) {
next->prev_submitted = prev;
} else {
G.last_submitted_command_descriptor_heap = prev;
}
} else {
/* No available heap available for reuse, allocate new */
cdh = arena_push_no_zero(G.command_descriptor_heaps_arena, struct command_descriptor_heap);
}
sys_mutex_unlock(&lock);
}
MEMZERO_STRUCT(cdh);
if (old_heap) {
cdh->heap = old_heap;
cdh->cpu_handle = old_cpu_handle;
cdh->gpu_handle = old_gpu_handle;
} else {
D3D12_DESCRIPTOR_HEAP_DESC desc = ZI;
desc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV;
desc.NumDescriptors = DX12_NUM_CBV_SRV_UAV_DESCRIPTORS;
desc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE;
HRESULT hr = ID3D12Device_CreateDescriptorHeap(G.device, &desc, &IID_ID3D12DescriptorHeap, (void **)&cdh->heap);
if (FAILED(hr)) {
sys_panic(LIT("Failed to create GPU descriptor heap"));
}
ID3D12DescriptorHeap_GetCPUDescriptorHandleForHeapStart(cdh->heap, &cdh->cpu_handle);
ID3D12DescriptorHeap_GetGPUDescriptorHandleForHeapStart(cdh->heap, &cdh->gpu_handle);
}
/* Copy CPU heap */
{
struct sys_lock lock = sys_mutex_lock_s(dh_cpu->mutex);
ID3D12Device_CopyDescriptorsSimple(G.device, dh_cpu->num_descriptors_reserved, cdh->cpu_handle, dh_cpu->handle, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV);
sys_mutex_unlock(&lock);
}
/* Insert into command list */
cdh->next_in_command_list = cl->first_command_descriptor_heap;
cl->first_command_descriptor_heap = cdh;
return cdh;
}
/* ========================== *
* Command buffer
* ========================== */
INTERNAL u64 command_buffer_hash_from_size(u64 size)
{
u64 hash = rand_u64_from_seed(size);
return hash;
}
INTERNAL u64 align_up_pow2(u64 v)
{
u64 res = 0;
if (v > 0) {
res = v - 1;
res |= res >> 1;
res |= res >> 2;
res |= res >> 4;
res |= res >> 8;
res |= res >> 16;
res |= res >> 32;
++res;
}
return res;
}
INTERNAL struct command_buffer *command_list_push_buffer(struct command_list *cl, struct string data)
{
/* Determine size */
u64 size = max_u64(DX12_COMMAND_BUFFER_MIN_SIZE, align_up_pow2(data.len));
/* Allocate buffer */
struct command_buffer_group *cb_group = NULL;
struct command_buffer *cb = NULL;
struct dx12_resource *old_staging_resource = NULL;
struct dx12_resource *old_gpu_resource = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(G.command_buffers_mutex);
{
u64 group_hash = command_buffer_hash_from_size(size);
struct dict_entry *cb_group_entry = dict_ensure_entry(G.command_buffers_arena, G.command_buffers_dict, group_hash);
cb_group = cb_group_entry->value;
if (!cb_group) {
/* Create group */
cb_group = arena_push(G.command_buffers_arena, struct command_buffer_group);
cb_group_entry->value = (u64)cb_group;
}
}
/* Find first command buffer ready for reuse */
for (struct command_buffer *tmp = cb_group->first_submitted; tmp; tmp = tmp->next_submitted) {
/* TODO: Cache completed fence values */
u64 queue_fence_value = ID3D12Fence_GetCompletedValue(tmp->submitted_cq->fence);
if (queue_fence_value >= tmp->submitted_fence_target) {
cb = tmp;
break;
}
}
if (cb) {
/* Remove from submitted list */
old_staging_resource = cb->staging_resource;
old_gpu_resource = cb->gpu_resource;
struct command_buffer *prev = cb->prev_submitted;
struct command_buffer *next = cb->next_submitted;
if (prev) {
prev->next_submitted = next;
} else {
cb_group->first_submitted = next;
}
if (next) {
next->prev_submitted = prev;
} else {
cb_group->last_submitted = prev;
}
} else {
/* Allocate new */
cb = arena_push_no_zero(G.command_buffers_arena, struct command_buffer);
}
sys_mutex_unlock(&lock);
}
MEMZERO_STRUCT(cb);
cb->group = cb_group;
if (old_staging_resource) {
cb->staging_resource = old_staging_resource;
cb->gpu_resource = old_gpu_resource;
} else {
/* Create staging resource */
{
#if 0
LOCAL_PERSIST const DXGI_FORMAT formats[] = {
[GPU_TEXTURE_FORMAT_R8G8B8A8_UNORM] = DXGI_FORMAT_R8G8B8A8_UNORM,
[GPU_TEXTURE_FORMAT_R8G8B8A8_UNORM_SRGB] = DXGI_FORMAT_R8G8B8A8_UNORM_SRGB
};
DXGI_FORMAT dxgi_format = 0;
if (format < (i32)ARRAY_COUNT(formats)) {
dxgi_format = formats[format];
}
if (format == 0) {
sys_panic(LIT("Tried to create texture with unknown format"));
}
enum dx12_resource_view_flags view_flags = DX12_RESOURCE_VIEW_FLAG_NONE;
D3D12_HEAP_PROPERTIES heap_props = { .Type = D3D12_HEAP_TYPE_DEFAULT };
heap_props.CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN;
heap_props.MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN;
D3D12_HEAP_FLAGS heap_flags = D3D12_HEAP_FLAG_CREATE_NOT_ZEROED;
D3D12_RESOURCE_DESC desc = ZI;
desc.Dimension = D3D12_RESOURCE_DIMENSION_TEXTURE2D;
desc.Alignment = 0;
desc.Width = size.x;
desc.Height = size.y;
desc.DepthOrArraySize = 1;
desc.MipLevels = 1;
desc.Format = dxgi_format;
desc.SampleDesc.Count = 1;
desc.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN;
D3D12_RESOURCE_STATES initial_state = D3D12_RESOURCE_STATE_GENERIC_READ;
cb->staging_resource = dx12_resource_alloc(heap_props, heap_flags, desc, initial_state, view_flags);
#endif
}
/* Create gpu resource */
}
/* Copy & submit data */
/* FIXME */
(UNUSED)data;
/* Insert into command list */
cb->next_in_command_list = cl->first_command_buffer;
cl->first_command_buffer = cb;
return cb;
}
/* ========================== * /* ========================== *
* Dispatch * Dispatch
* ========================== */ * ========================== */
@ -1745,6 +1934,7 @@ void gpu_dispatch(struct gpu_dispatch_params params)
struct command_queue *cq = G.cq_direct; struct command_queue *cq = G.cq_direct;
struct command_list *cl = command_list_open(cq); struct command_list *cl = command_list_open(cq);
struct command_buffer *instance_buffer = command_list_push_buffer(cl, STRING_FROM_ARENA(plan->material_instances_arena));
struct command_descriptor_heap *descriptor_heap = command_list_push_descriptor_heap(cl, G.cbv_srv_uav_heap); struct command_descriptor_heap *descriptor_heap = command_list_push_descriptor_heap(cl, G.cbv_srv_uav_heap);
/* Viewport */ /* Viewport */
@ -1807,7 +1997,7 @@ void gpu_dispatch(struct gpu_dispatch_params params)
ID3D12GraphicsCommandList_IASetPrimitiveTopology(cl->cl, D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST); ID3D12GraphicsCommandList_IASetPrimitiveTopology(cl->cl, D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
ID3D12GraphicsCommandList_IASetVertexBuffers(cl->cl, 0, 1, &G.dummy_vertex_buffer_view); ID3D12GraphicsCommandList_IASetVertexBuffers(cl->cl, 0, 1, &G.dummy_vertex_buffer_view);
ID3D12GraphicsCommandList_IASetIndexBuffer(cl->cl, &G.quad_index_buffer_view); ID3D12GraphicsCommandList_IASetIndexBuffer(cl->cl, &G.quad_index_buffer_view);
ID3D12GraphicsCommandList_DrawIndexedInstanced(cl->cl, 6, plan->material_instances->count, 0, 0, 0); ID3D12GraphicsCommandList_DrawIndexedInstanced(cl->cl, 6, instance_buffer->count, 0, 0, 0);
/* Reset render target */ /* Reset render target */
dx12_resource_barrier(cl->cl, target, old_state); dx12_resource_barrier(cl->cl, target, old_state);

View File

@ -246,15 +246,15 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(resource_watch_dispatcher_thread_entry_
sys_mutex_unlock(&watch_dispatcher_lock); sys_mutex_unlock(&watch_dispatcher_lock);
{ {
__profscope(run_resource_watch_callbacks); __profscope(run_resource_watch_callbacks);
struct dict dedup_dict = dict_init(temp.arena, WATCH_DISPATCHER_DEDUP_DICT_BINS); struct dict *dedup_dict = dict_init(temp.arena, WATCH_DISPATCHER_DEDUP_DICT_BINS);
for (struct sys_watch_info *info = watch_info_list.first; info; info = info->next) { for (struct sys_watch_info *info = watch_info_list.first; info; info = info->next) {
/* Do not run callbacks for the same file more than once */ /* Do not run callbacks for the same file more than once */
b32 skip = false; b32 skip = false;
u64 hash = hash_fnv64(HASH_FNV64_BASIS, info->name); u64 hash = hash_fnv64(HASH_FNV64_BASIS, info->name);
if (dict_get(&dedup_dict, hash) == 1) { if (dict_get(dedup_dict, hash) == 1) {
skip = true; skip = true;
} else { } else {
dict_set(temp.arena, &dedup_dict, hash, 1); dict_set(temp.arena, dedup_dict, hash, 1);
} }
if (!skip) { if (!skip) {
struct sys_lock callbacks_lock = sys_mutex_lock_s(G.watch_callbacks_mutex); struct sys_lock callbacks_lock = sys_mutex_lock_s(G.watch_callbacks_mutex);

View File

@ -464,8 +464,8 @@ INTERNAL void test_generate_walls(struct sim_snapshot *world)
/* Dicts containing walls that end on edge of tile chunk, keyed by tile end index. /* Dicts containing walls that end on edge of tile chunk, keyed by tile end index.
* Used to merge walls accross tile chunks. */ * Used to merge walls accross tile chunks. */
struct dict horizontal_ends_dict = dict_init(scratch.arena, 1024); struct dict *horizontal_ends_dict = dict_init(scratch.arena, 1024);
struct dict vertical_ends_dict = dict_init(scratch.arena, 1024); struct dict *vertical_ends_dict = dict_init(scratch.arena, 1024);
struct wall_node *first_wall = NULL; struct wall_node *first_wall = NULL;
@ -519,11 +519,11 @@ INTERNAL void test_generate_walls(struct sim_snapshot *world)
if (wall_start == 0) { if (wall_start == 0) {
u64 start_hash = rand_u64_from_seed(*(u64 *)&start); u64 start_hash = rand_u64_from_seed(*(u64 *)&start);
start_hash = rand_u64_from_seeds(start_hash, wall_dir); start_hash = rand_u64_from_seeds(start_hash, wall_dir);
struct dict_entry *entry = dict_get_entry(&horizontal_ends_dict, start_hash); struct dict_entry *entry = dict_get_entry(horizontal_ends_dict, start_hash);
if (entry) { if (entry) {
/* Existing wall exists accross chunk boundary */ /* Existing wall exists accross chunk boundary */
node = (struct wall_node *)entry->value; node = (struct wall_node *)entry->value;
dict_remove_entry(&horizontal_ends_dict, entry); dict_remove_entry(horizontal_ends_dict, entry);
} }
} }
if (!node) { if (!node) {
@ -537,7 +537,7 @@ INTERNAL void test_generate_walls(struct sim_snapshot *world)
if (wall_end == SIM_TILES_PER_CHUNK_SQRT) { if (wall_end == SIM_TILES_PER_CHUNK_SQRT) {
u64 end_hash = rand_u64_from_seed(*(u64 *)&end); u64 end_hash = rand_u64_from_seed(*(u64 *)&end);
end_hash = rand_u64_from_seeds(end_hash, wall_dir); end_hash = rand_u64_from_seeds(end_hash, wall_dir);
dict_set(scratch.arena, &horizontal_ends_dict, end_hash, (u64)node); dict_set(scratch.arena, horizontal_ends_dict, end_hash, (u64)node);
} }
wall_start = -1; wall_start = -1;
wall_end = -1; wall_end = -1;
@ -609,11 +609,11 @@ INTERNAL void test_generate_walls(struct sim_snapshot *world)
if (wall_start == 0) { if (wall_start == 0) {
u64 start_hash = rand_u64_from_seed(*(u64 *)&start); u64 start_hash = rand_u64_from_seed(*(u64 *)&start);
start_hash = rand_u64_from_seeds(start_hash, wall_dir); start_hash = rand_u64_from_seeds(start_hash, wall_dir);
struct dict_entry *entry = dict_get_entry(&vertical_ends_dict, start_hash); struct dict_entry *entry = dict_get_entry(vertical_ends_dict, start_hash);
if (entry) { if (entry) {
/* Existing wall exists accross chunk boundary */ /* Existing wall exists accross chunk boundary */
node = (struct wall_node *)entry->value; node = (struct wall_node *)entry->value;
dict_remove_entry(&vertical_ends_dict, entry); dict_remove_entry(vertical_ends_dict, entry);
} }
} }
if (!node) { if (!node) {
@ -627,7 +627,7 @@ INTERNAL void test_generate_walls(struct sim_snapshot *world)
if (wall_end == SIM_TILES_PER_CHUNK_SQRT) { if (wall_end == SIM_TILES_PER_CHUNK_SQRT) {
u64 end_hash = rand_u64_from_seed(*(u64 *)&end); u64 end_hash = rand_u64_from_seed(*(u64 *)&end);
end_hash = rand_u64_from_seeds(end_hash, wall_dir); end_hash = rand_u64_from_seeds(end_hash, wall_dir);
dict_set(scratch.arena, &vertical_ends_dict, end_hash, (u64)node); dict_set(scratch.arena, vertical_ends_dict, end_hash, (u64)node);
} }
wall_start = -1; wall_start = -1;
wall_end = -1; wall_end = -1;

View File

@ -463,7 +463,7 @@ INTERNAL struct sprite_sheet init_sheet_from_ase_result(struct arena *arena, str
span->start = ase_span->start; span->start = ase_span->start;
span->end = ase_span->end; span->end = ase_span->end;
u64 hash = hash_fnv64(HASH_FNV64_BASIS, name); u64 hash = hash_fnv64(HASH_FNV64_BASIS, name);
dict_set(arena, &sheet.spans_dict, hash, (u64)span); dict_set(arena, sheet.spans_dict, hash, (u64)span);
++index; ++index;
} }
} }
@ -494,15 +494,15 @@ INTERNAL struct sprite_sheet init_sheet_from_ase_result(struct arena *arena, str
u64 num_temp_slice_group_nodes = 0; u64 num_temp_slice_group_nodes = 0;
struct temp_slice_group_node *temp_slice_group_head = NULL; struct temp_slice_group_node *temp_slice_group_head = NULL;
{ {
struct dict temp_slice_dict = dict_init(scratch.arena, (u64)(ase.num_slice_keys * 2)); struct dict *temp_slice_dict = dict_init(scratch.arena, (u64)(ase.num_slice_keys * 2));
for (struct ase_slice_key *ase_slice_key = ase.slice_key_head; ase_slice_key; ase_slice_key = ase_slice_key->next) { for (struct ase_slice_key *ase_slice_key = ase.slice_key_head; ase_slice_key; ase_slice_key = ase_slice_key->next) {
struct string name = ase_slice_key->name; struct string name = ase_slice_key->name;
u64 hash = hash_fnv64(HASH_FNV64_BASIS, name); u64 hash = hash_fnv64(HASH_FNV64_BASIS, name);
struct temp_slice_group_node *temp_slice_group_node = (struct temp_slice_group_node *)dict_get(&temp_slice_dict, hash); struct temp_slice_group_node *temp_slice_group_node = (struct temp_slice_group_node *)dict_get(temp_slice_dict, hash);
if (!temp_slice_group_node) { if (!temp_slice_group_node) {
temp_slice_group_node = arena_push(scratch.arena, struct temp_slice_group_node); temp_slice_group_node = arena_push(scratch.arena, struct temp_slice_group_node);
temp_slice_group_node->name = name; temp_slice_group_node->name = name;
dict_set(scratch.arena, &temp_slice_dict, hash, (u64)temp_slice_group_node); dict_set(scratch.arena, temp_slice_dict, hash, (u64)temp_slice_group_node);
++num_temp_slice_group_nodes; ++num_temp_slice_group_nodes;
temp_slice_group_node->next = temp_slice_group_head; temp_slice_group_node->next = temp_slice_group_head;
@ -586,7 +586,7 @@ INTERNAL struct sprite_sheet init_sheet_from_ase_result(struct arena *arena, str
temp_slice_group_node->final_slice_group = slice_group; temp_slice_group_node->final_slice_group = slice_group;
u64 hash = hash_fnv64(HASH_FNV64_BASIS, slice_group->name); u64 hash = hash_fnv64(HASH_FNV64_BASIS, slice_group->name);
dict_set(arena, &sheet.slice_groups_dict, hash, (u64)slice_group); dict_set(arena, sheet.slice_groups_dict, hash, (u64)slice_group);
++index; ++index;
} }
@ -635,7 +635,7 @@ INTERNAL struct sprite_sheet init_sheet_from_ase_result(struct arena *arena, str
struct string point_slice_name = ray_slice_name; struct string point_slice_name = ray_slice_name;
point_slice_name.len -= ray_suffix.len; point_slice_name.len -= ray_suffix.len;
u64 hash = hash_fnv64(HASH_FNV64_BASIS, point_slice_name); u64 hash = hash_fnv64(HASH_FNV64_BASIS, point_slice_name);
struct sprite_sheet_slice_group *point_slice_group = (struct sprite_sheet_slice_group *)dict_get(&sheet.slice_groups_dict, hash); struct sprite_sheet_slice_group *point_slice_group = (struct sprite_sheet_slice_group *)dict_get(sheet.slice_groups_dict, hash);
if (point_slice_group) { if (point_slice_group) {
u32 point_slices_per_frame = point_slice_group->per_frame_count; u32 point_slices_per_frame = point_slice_group->per_frame_count;
@ -1096,7 +1096,7 @@ struct sprite_sheet_span sprite_sheet_get_span(struct sprite_sheet *sheet, struc
struct sprite_sheet_span res = ZI; struct sprite_sheet_span res = ZI;
if (sheet->spans_count > 0) { if (sheet->spans_count > 0) {
u64 hash = hash_fnv64(HASH_FNV64_BASIS, name); u64 hash = hash_fnv64(HASH_FNV64_BASIS, name);
struct sprite_sheet_span *entry = (struct sprite_sheet_span *)dict_get(&sheet->spans_dict, hash); struct sprite_sheet_span *entry = (struct sprite_sheet_span *)dict_get(sheet->spans_dict, hash);
if (entry) { if (entry) {
res = *entry; res = *entry;
} }
@ -1108,7 +1108,7 @@ struct sprite_sheet_slice sprite_sheet_get_slice(struct sprite_sheet *sheet, str
{ {
if (sheet->slice_groups_count > 0) { if (sheet->slice_groups_count > 0) {
u64 hash = hash_fnv64(HASH_FNV64_BASIS, name); u64 hash = hash_fnv64(HASH_FNV64_BASIS, name);
struct sprite_sheet_slice_group *group = (struct sprite_sheet_slice_group *)dict_get(&sheet->slice_groups_dict, hash); struct sprite_sheet_slice_group *group = (struct sprite_sheet_slice_group *)dict_get(sheet->slice_groups_dict, hash);
if (group) { if (group) {
return group->frame_slices[frame_index * group->per_frame_count]; return group->frame_slices[frame_index * group->per_frame_count];
} }
@ -1134,7 +1134,7 @@ struct sprite_sheet_slice_array sprite_sheet_get_slices(struct sprite_sheet *she
struct sprite_sheet_slice_array res = ZI; struct sprite_sheet_slice_array res = ZI;
if (sheet->slice_groups_count > 0) { if (sheet->slice_groups_count > 0) {
u64 hash = hash_fnv64(HASH_FNV64_BASIS, name); u64 hash = hash_fnv64(HASH_FNV64_BASIS, name);
struct sprite_sheet_slice_group *group = (struct sprite_sheet_slice_group *)dict_get(&sheet->slice_groups_dict, hash); struct sprite_sheet_slice_group *group = (struct sprite_sheet_slice_group *)dict_get(sheet->slice_groups_dict, hash);
if (group) { if (group) {
res.count = group->per_frame_count; res.count = group->per_frame_count;
res.slices = &group->frame_slices[frame_index * group->per_frame_count]; res.slices = &group->frame_slices[frame_index * group->per_frame_count];

View File

@ -72,11 +72,11 @@ struct sprite_sheet {
u32 spans_count; u32 spans_count;
struct sprite_sheet_span *spans; struct sprite_sheet_span *spans;
struct dict spans_dict; struct dict *spans_dict;
u32 slice_groups_count; u32 slice_groups_count;
struct sprite_sheet_slice_group *slice_groups; struct sprite_sheet_slice_group *slice_groups;
struct dict slice_groups_dict; struct dict *slice_groups_dict;
}; };
struct sprite_sheet *sprite_sheet_from_tag_await(struct sprite_scope *scope, struct sprite_tag tag); struct sprite_sheet *sprite_sheet_from_tag_await(struct sprite_scope *scope, struct sprite_tag tag);

View File

@ -133,7 +133,7 @@ struct tar_archive tar_parse(struct arena *arena, struct string data, struct str
archive.lookup = dict_init(arena, (u64)((f64)num_files * ARCHIVE_LOOKUP_TABLE_CAPACITY_FACTOR)); archive.lookup = dict_init(arena, (u64)((f64)num_files * ARCHIVE_LOOKUP_TABLE_CAPACITY_FACTOR));
for (struct tar_entry *entry = archive.head; entry; entry = entry->next) { for (struct tar_entry *entry = archive.head; entry; entry = entry->next) {
u64 hash = hash_fnv64(HASH_FNV64_BASIS, entry->file_name); u64 hash = hash_fnv64(HASH_FNV64_BASIS, entry->file_name);
dict_set(arena, &archive.lookup, hash, (u64)entry); dict_set(arena, archive.lookup, hash, (u64)entry);
} }
/* Build hierarchy */ /* Build hierarchy */
@ -147,7 +147,7 @@ struct tar_archive tar_parse(struct arena *arena, struct string data, struct str
for (struct string parent_dir_name = entry->file_name; parent_dir_name.len > 0; --parent_dir_name.len) { for (struct string parent_dir_name = entry->file_name; parent_dir_name.len > 0; --parent_dir_name.len) {
if (parent_dir_name.text[parent_dir_name.len - 1] == '/') { if (parent_dir_name.text[parent_dir_name.len - 1] == '/') {
u64 hash = hash_fnv64(HASH_FNV64_BASIS, parent_dir_name); u64 hash = hash_fnv64(HASH_FNV64_BASIS, parent_dir_name);
parent_entry = (struct tar_entry *)dict_get(&archive.lookup, hash); parent_entry = (struct tar_entry *)dict_get(archive.lookup, hash);
break; break;
} }
} }
@ -165,5 +165,5 @@ struct tar_archive tar_parse(struct arena *arena, struct string data, struct str
struct tar_entry *tar_get(struct tar_archive *archive, struct string name) struct tar_entry *tar_get(struct tar_archive *archive, struct string name)
{ {
u64 hash = hash_fnv64(HASH_FNV64_BASIS, name); u64 hash = hash_fnv64(HASH_FNV64_BASIS, name);
return (struct tar_entry *)dict_get(&archive->lookup, hash); return (struct tar_entry *)dict_get(archive->lookup, hash);
} }

View File

@ -13,7 +13,7 @@ struct tar_entry {
}; };
struct tar_archive { struct tar_archive {
struct dict lookup; struct dict *lookup;
struct tar_entry *head; struct tar_entry *head;
}; };

View File

@ -127,12 +127,12 @@ struct dict {
struct dict_entry *last; struct dict_entry *last;
}; };
INLINE struct dict dict_init(struct arena *arena, u64 bins_count) INLINE struct dict *dict_init(struct arena *arena, u64 bins_count)
{ {
__prof; __prof;
struct dict dict = ZI; struct dict *dict = arena_push(arena, struct dict);
dict.bins_count = max_u64(bins_count, 1); /* Ensure at least 1 bin */ dict->bins_count = max_u64(bins_count, 1); /* Ensure at least 1 bin */
dict.bins = arena_push_array(arena, struct dict_bin, dict.bins_count); dict->bins = arena_push_array(arena, struct dict_bin, dict->bins_count);
return dict; return dict;
} }