diff --git a/src/gpu_dx12.c b/src/gpu_dx12.c index 11ab90ff..3db1840d 100644 --- a/src/gpu_dx12.c +++ b/src/gpu_dx12.c @@ -43,29 +43,29 @@ # define DX12_SHADER_DEBUG 0 #endif -enum dx12_pipeline_desc_flags { - DX12_PIPELINE_DESC_FLAG_NONE = 0, - DX12_PIPELINE_DESC_FLAG_VS = (1 << 0), - DX12_PIPELINE_DESC_FLAG_PS = (1 << 1) +enum pipeline_desc_flags { + PIPELINE_DESC_FLAG_NONE = 0, + PIPELINE_DESC_FLAG_VS = (1 << 0), + PIPELINE_DESC_FLAG_PS = (1 << 1) }; -struct dx12_pipeline_desc { +struct pipeline_desc { char *shader; u32 flags; }; -struct dx12_pipeline { - struct dx12_pipeline_desc desc; +struct pipeline { + struct pipeline_desc desc; ID3D12PipelineState *pso; }; -struct dx12_pipeline_result { - struct dx12_pipeline pipeline; +struct pipeline_result { + struct pipeline pipeline; u64 errors_text_len; u8 errors_text[KILOBYTE(16)]; }; -struct dx12_pipeline_error { +struct pipeline_error { struct string msg; }; @@ -85,7 +85,7 @@ struct dx12_handle_entry { struct dx12_handle_entry *next_free; }; -struct dx12_texture { +struct dx12_buffer { i32 _; }; @@ -172,7 +172,7 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(gpu_shutdown) * Handle * ========================== */ -INTERNAL void dx12_texture_release(struct dx12_texture *t); +INTERNAL void dx12_buffer_release(struct dx12_buffer *t); INTERNAL struct gpu_handle handle_alloc(enum dx12_handle_kind kind, void *data) { @@ -246,7 +246,7 @@ void gpu_release(struct gpu_handle handle) case DX12_HANDLE_KIND_TEXTURE: { - dx12_texture_release(data); + dx12_buffer_release(data); } break; } } @@ -528,12 +528,12 @@ INTERNAL void dx12_init_base(struct sys_window *window) /* ============= */ /* Texture pipeline */ -PACK(struct dx12_texture_pipeline_uniform { +PACK(struct dx12_buffer_pipeline_uniform { struct mat4x4 vp; u32 instance_offset; }); -PACK(struct dx12_texture_pipeline_instance { +PACK(struct dx12_buffer_pipeline_instance { struct xform xf; struct v2 uv0; struct v2 uv1; @@ -547,29 +547,29 @@ PACK(struct dx12_texture_pipeline_instance { /* ============= */ /* Init pipelines */ -INTERNAL struct dx12_pipeline_result *pipeline_alloc_from_descs(struct arena *arena, u64 num_pipelines, struct dx12_pipeline_desc *descs); -INTERNAL void dx12_pipeline_release(struct dx12_pipeline *pipeline); +INTERNAL struct pipeline_result *pipeline_alloc_from_descs(struct arena *arena, u64 num_pipelines, struct pipeline_desc *descs); +INTERNAL void pipeline_release(struct pipeline *pipeline); INTERNAL void dx12_init_pipelines(void) { __prof; struct arena_temp scratch = scratch_begin_no_conflict(); - struct dx12_pipeline_desc pipeline_descs[] = { + struct pipeline_desc pipeline_descs[] = { /* Texture pipeline */ { .shader = "shaders/texture.hlsl", - .flags = DX12_PIPELINE_DESC_FLAG_VS | - DX12_PIPELINE_DESC_FLAG_PS + .flags = PIPELINE_DESC_FLAG_VS | + PIPELINE_DESC_FLAG_PS } }; - struct dx12_pipeline_result *results = pipeline_alloc_from_descs(scratch.arena, ARRAY_COUNT(pipeline_descs), pipeline_descs); + struct pipeline_result *results = pipeline_alloc_from_descs(scratch.arena, ARRAY_COUNT(pipeline_descs), pipeline_descs); for (u64 i = 0; i < ARRAY_COUNT(pipeline_descs); ++i) { - struct dx12_pipeline_result *result = &results[i]; + struct pipeline_result *result = &results[i]; if (result->errors_text_len > 0) { struct string msg = STRING(result->errors_text_len, result->errors_text); sys_panic(msg); - dx12_pipeline_release(&result->pipeline); + pipeline_release(&result->pipeline); } else { /* TODO */ } @@ -585,7 +585,7 @@ INTERNAL void dx12_init_pipelines(void) struct dx12_include_handler { ID3DInclude d3d_handler; ID3DIncludeVtbl vtbl; - struct dx12_pipeline *pipeline; + struct pipeline *pipeline; b32 has_open_resource; struct resource res; }; @@ -634,7 +634,7 @@ INTERNAL HRESULT dx12_include_close(ID3DInclude *d3d_handler, LPCVOID data) return S_OK; } -INTERNAL struct dx12_include_handler dx12_include_handler_alloc(struct dx12_pipeline *pipeline) +INTERNAL struct dx12_include_handler dx12_include_handler_alloc(struct pipeline *pipeline) { struct dx12_include_handler handler = ZI; handler.d3d_handler.lpVtbl = &handler.vtbl; @@ -652,9 +652,6 @@ INTERNAL void dx12_include_handler_release(struct dx12_include_handler *handler) } } - /* TODO: Compile shaders offline w/ dxc. - * Will also allow for some hlsl language features like static_assert */ - enum shader_compile_task_kind { SHADER_COMPILE_TASK_KIND_VS, SHADER_COMPILE_TASK_KIND_PS @@ -663,7 +660,7 @@ enum shader_compile_task_kind { struct shader_compile_task_arg { /* In */ enum shader_compile_task_kind kind; - struct dx12_pipeline *pipeline; + struct pipeline *pipeline; struct resource *src_res; /* Out */ @@ -672,11 +669,12 @@ struct shader_compile_task_arg { ID3DBlob *error_blob; }; +/* TODO: Compile shaders offline w/ dxc for performance & language features like static_assert */ INTERNAL WORK_TASK_FUNC_DEF(shader_compile_task, comp_arg_raw) { __prof; struct shader_compile_task_arg *comp_arg = (struct shader_compile_task_arg *)comp_arg_raw; - struct dx12_pipeline *pipeline = comp_arg->pipeline; + struct pipeline *pipeline = comp_arg->pipeline; struct string shader_name = string_from_cstr_no_limit(pipeline->desc.shader); enum shader_compile_task_kind kind = comp_arg->kind; struct resource *src_res = comp_arg->src_res; @@ -775,17 +773,17 @@ INTERNAL WORK_TASK_FUNC_DEF(shader_compile_task, comp_arg_raw) * ========================== */ struct pipeline_load_task_arg { - struct dx12_pipeline *pipeline; - struct dx12_pipeline_result *result; + struct pipeline *pipeline; + struct pipeline_result *result; }; INTERNAL WORK_TASK_FUNC_DEF(pipeline_load_task, load_arg_raw) { __prof; struct pipeline_load_task_arg *load_arg = (struct pipeline_load_task_arg *)load_arg_raw; - struct dx12_pipeline *pipeline = load_arg->pipeline; - struct dx12_pipeline_desc desc = pipeline->desc; - struct dx12_pipeline_result *result = load_arg->result; + struct pipeline *pipeline = load_arg->pipeline; + struct pipeline_desc desc = pipeline->desc; + struct pipeline_result *result = load_arg->result; struct arena_temp scratch = scratch_begin_no_conflict(); { @@ -804,19 +802,19 @@ INTERNAL WORK_TASK_FUNC_DEF(pipeline_load_task, load_arg_raw) ps.src_res = &src_res; ps.pipeline = pipeline; + /* Compile shaders */ struct work_slate ws = work_slate_begin(); - if (desc.flags & DX12_PIPELINE_DESC_FLAG_VS) { + if (desc.flags & PIPELINE_DESC_FLAG_VS) { work_slate_push_task(&ws, shader_compile_task, &vs); } - if (desc.flags & DX12_PIPELINE_DESC_FLAG_PS) { + if (desc.flags & PIPELINE_DESC_FLAG_PS) { work_slate_push_task(&ws, shader_compile_task, &ps); } struct work_handle work = work_slate_end_and_help(&ws, WORK_PRIORITY_NORMAL); work_wait(work); - b32 success = vs.success && ps.success; - /* FIXME: Validate root signature blob exists in bytecode */ + b32 success = vs.success && ps.success; /* Create PSO */ ID3D12PipelineState *pso = NULL; @@ -922,19 +920,18 @@ INTERNAL WORK_TASK_FUNC_DEF(pipeline_load_task, load_arg_raw) scratch_end(scratch); } -INTERNAL struct dx12_pipeline_result *pipeline_alloc_from_descs(struct arena *arena, u64 num_pipelines, struct dx12_pipeline_desc *descs) +INTERNAL struct pipeline_result *pipeline_alloc_from_descs(struct arena *arena, u64 num_pipelines, struct pipeline_desc *descs) { __prof; - struct dx12_pipeline_result *results = arena_push_array(arena, struct dx12_pipeline_result, num_pipelines); + struct pipeline_result *results = arena_push_array(arena, struct pipeline_result, num_pipelines); struct pipeline_load_task_arg *task_args = arena_push_array(arena, struct pipeline_load_task_arg, num_pipelines); - struct sys_mutex arena_mutex = sys_mutex_alloc(); - /* Create & dispatch work */ + /* Load pipelines */ struct work_slate ws = work_slate_begin(); for (u64 i = 0; i < num_pipelines; ++i) { - struct dx12_pipeline_result *result = &results[i]; + struct pipeline_result *result = &results[i]; - struct dx12_pipeline *pipeline = &results->pipeline; + struct pipeline *pipeline = &results->pipeline; pipeline->desc = descs[i]; struct pipeline_load_task_arg *arg = &task_args[i]; @@ -946,11 +943,10 @@ INTERNAL struct dx12_pipeline_result *pipeline_alloc_from_descs(struct arena *ar struct work_handle work = work_slate_end_and_help(&ws, WORK_PRIORITY_NORMAL); work_wait(work); - sys_mutex_release(&arena_mutex); return results; } -INTERNAL void dx12_pipeline_release(struct dx12_pipeline *pipeline) +INTERNAL void pipeline_release(struct pipeline *pipeline) { __prof; if (pipeline->pso) { @@ -959,21 +955,26 @@ INTERNAL void dx12_pipeline_release(struct dx12_pipeline *pipeline) } /* ========================== * - * Texture + * Buffer * ========================== */ -INTERNAL void dx12_texture_release(struct dx12_texture *t) +INTERNAL void dx12_buffer_release(struct dx12_buffer *t) { (UNUSED)t; } +/* ========================== * + * Texture + * ========================== */ + + struct gpu_handle gpu_texture_alloc(enum gpu_texture_format format, u32 flags, struct v2i32 size, void *initial_data) { (UNUSED)format; (UNUSED)flags; (UNUSED)size; (UNUSED)initial_data; - struct dx12_texture *t = NULL; + struct dx12_buffer *t = NULL; return handle_alloc(DX12_HANDLE_KIND_TEXTURE, t); }