diff --git a/src/gp_dx12.c b/src/gp_dx12.c index 9f5da069..3a956836 100644 --- a/src/gp_dx12.c +++ b/src/gp_dx12.c @@ -117,26 +117,32 @@ struct pipeline_scope { }; struct command_queue { - D3D12_COMMAND_LIST_TYPE type; ID3D12CommandQueue *cq; struct arena *arena; - struct sys_mutex *submitted_command_lists_mutex; - struct command_list *first_submitted_command_list; - struct command_list *last_submitted_command_list; - struct sys_mutex *submit_fence_mutex; u64 submit_fence_target; ID3D12Fence *submit_fence; + struct command_list_pool *cl_pool; + #if PROFILING struct __prof_dx12_ctx *prof; #endif }; +struct command_list_pool { + struct command_queue *cq; + struct arena *arena; + struct sys_mutex *mutex; + struct command_list *first_submitted_command_list; + struct command_list *last_submitted_command_list; +}; + struct command_list { struct command_queue *cq; + struct command_list_pool *pool; struct ID3D12CommandAllocator *ca; struct ID3D12GraphicsCommandList *cl; struct sys_lock global_record_lock; @@ -449,10 +455,9 @@ INTERNAL void *handle_get_data_locked(struct gp_handle handle, enum handle_kind if (handle.gen) { struct handle_entry *entry = handle_get_entry(handle, lock); data = entry->data; -#if RTC /* Handle should match expected kind */ + (UNUSED)kind; ASSERT(entry->kind == kind); -#endif } return data; } @@ -1390,17 +1395,16 @@ INTERNAL struct descriptor *descriptor_alloc(struct cpu_descriptor_heap *dh) struct sys_lock lock = sys_mutex_lock_e(dh->mutex); if (dh->first_free_descriptor) { d = dh->first_free_descriptor; + dh->first_free_descriptor = d->next_free; handle = d->handle; index = d->index; } else { if (dh->num_descriptors_reserved >= dh->num_descriptors_capacity) { sys_panic(LIT("Max descriptors reached in heap")); } - d = arena_push_no_zero(dh->arena, struct descriptor); - index = dh->num_descriptors_reserved; + index = dh->num_descriptors_reserved++; handle.ptr = dh->handle.ptr + (index * dh->descriptor_size); - ++dh->num_descriptors_reserved; } sys_mutex_unlock(&lock); } @@ -1711,6 +1715,8 @@ INTERNAL enum D3D12_RESOURCE_STATES dx12_resource_barrier(ID3D12GraphicsCommandL * Command queue * ========================== */ +INTERNAL struct command_list_pool *command_list_pool_alloc(struct command_queue *cq); + INTERNAL struct command_queue *command_queue_alloc(enum D3D12_COMMAND_LIST_TYPE type, enum D3D12_COMMAND_QUEUE_PRIORITY priority, struct string dbg_name) { __prof; @@ -1720,9 +1726,6 @@ INTERNAL struct command_queue *command_queue_alloc(enum D3D12_COMMAND_LIST_TYPE cq = arena_push(arena, struct command_queue); cq->arena = arena; } - cq->type = type; - cq->submitted_command_lists_mutex = sys_mutex_alloc(); - cq->submit_fence_mutex = sys_mutex_alloc(); D3D12_COMMAND_QUEUE_DESC desc = ZI; desc.Flags = D3D12_COMMAND_QUEUE_FLAG_NONE; @@ -1741,6 +1744,9 @@ INTERNAL struct command_queue *command_queue_alloc(enum D3D12_COMMAND_LIST_TYPE __prof_dx12_ctx_alloc(cq->prof, G.device, cq->cq, dbg_name.text, dbg_name.len); (UNUSED)dbg_name; + cq->type = type; + cq->submit_fence_mutex = sys_mutex_alloc(); + cq->cl_pool = command_list_pool_alloc(cq); return cq; } @@ -1756,18 +1762,32 @@ INTERNAL void command_queue_release(struct command_queue *cq) * Command list * ========================== */ -INTERNAL struct command_list *command_list_open(struct command_queue *cq) +INTERNAL struct command_list_pool *command_list_pool_alloc(struct command_queue *cq) +{ + struct command_list_pool *pool = NULL; + { + struct arena *arena = arena_alloc(GIGABYTE(64)); + pool = arena_push(arena, struct command_list_pool); + pool->arena = arena; + } + pool->cq = cq; + pool->mutex = sys_mutex_alloc(); + return pool; +} + +INTERNAL struct command_list *command_list_open(struct command_list_pool *pool) { __prof; + struct command_queue *cq = pool->cq; u64 completed_fence_value = ID3D12Fence_GetCompletedValue(cq->submit_fence); struct command_list *cl = NULL; struct ID3D12GraphicsCommandList *old_cl = NULL; struct ID3D12CommandAllocator *old_ca = NULL; { - struct sys_lock lock = sys_mutex_lock_e(cq->submitted_command_lists_mutex); + struct sys_lock lock = sys_mutex_lock_e(pool->mutex); /* Find first command list ready for reuse */ - for (struct command_list *tmp = cq->first_submitted_command_list; tmp; tmp = tmp->next_submitted) { + for (struct command_list *tmp = pool->first_submitted_command_list; tmp; tmp = tmp->next_submitted) { if (completed_fence_value >= tmp->submitted_fence_target) { cl = tmp; break; @@ -1782,24 +1802,24 @@ INTERNAL struct command_list *command_list_open(struct command_queue *cq) if (prev) { prev->next_submitted = next; } else { - cq->first_submitted_command_list = next; + pool->first_submitted_command_list = next; } if (next) { next->prev_submitted = prev; } else { - cq->last_submitted_command_list = prev; + pool->last_submitted_command_list = prev; } } else { - cl = arena_push_no_zero(cq->arena, struct command_list); + cl = arena_push_no_zero(pool->arena, struct command_list); } sys_mutex_unlock(&lock); } MEMZERO_STRUCT(cl); cl->cq = cq; + cl->pool = pool; cl->global_record_lock = sys_mutex_lock_s(G.global_command_list_record_mutex); HRESULT hr = 0; - /* FIXME: Determine command list type from command queue */ if (old_cl) { cl->cl = old_cl; cl->ca = old_ca; @@ -1839,6 +1859,7 @@ INTERNAL u64 command_list_close(struct command_list *cl) { __prof; struct command_queue *cq = cl->cq; + struct command_list_pool *pool = cl->pool; /* Close */ { @@ -1898,17 +1919,17 @@ INTERNAL u64 command_list_close(struct command_list *cl) sys_mutex_unlock(&lock); } - /* Add command list to submitted list */ + /* Add command list to pool submitted list */ sys_mutex_unlock(&cl->global_record_lock); cl->submitted_fence_target = submit_fence_target; { - struct sys_lock lock = sys_mutex_lock_e(cq->submitted_command_lists_mutex); - if (cq->last_submitted_command_list) { - cq->last_submitted_command_list->next_submitted = cl; + struct sys_lock lock = sys_mutex_lock_e(pool->mutex); + if (pool->last_submitted_command_list) { + pool->last_submitted_command_list->next_submitted = cl; } else { - cq->first_submitted_command_list = cl; + pool->first_submitted_command_list = cl; } - cq->last_submitted_command_list = cl; + pool->last_submitted_command_list = cl; sys_mutex_unlock(&lock); } @@ -2313,7 +2334,7 @@ struct gp_handle gp_texture_alloc(enum gp_texture_format format, u32 flags, stru /* Copy from upload heap to texture */ struct command_queue *cq = G.cq_copy_background; - struct command_list *cl = command_list_open(cq); + struct command_list *cl = command_list_open(cq->cl_pool); { __profscope_dx12(cl->cq->prof, cl->cl, Upload texture, RGB32_F(0.2, 0.5, 0.2)); D3D12_TEXTURE_COPY_LOCATION dst_loc = { @@ -2380,7 +2401,7 @@ void gp_dispatch(struct gp_dispatch_params params) struct pipeline_scope *pipeline_scope = pipeline_scope_begin(); struct pipeline *material_pipeline = pipeline_from_name(pipeline_scope, LIT("material")); struct pipeline *shape_pipeline = pipeline_from_name(pipeline_scope, LIT("shape")); - struct command_list *cl = command_list_open(G.cq_direct); + struct command_list *cl = command_list_open(G.cq_direct->cl_pool); { __profscope_dx12(cl->cq->prof, cl->cl, Dispatch, RGB32_F(0.5, 0.2, 0.2)); struct dx12_resource *target = handle_get_data(params.draw_target, DX12_HANDLE_KIND_RESOURCE); @@ -2699,7 +2720,7 @@ INTERNAL void present_blit(struct swapchain_buffer *dst, struct dx12_resource *s struct pipeline_scope *pipeline_scope = pipeline_scope_begin(); struct pipeline *blit_pipeline = pipeline_from_name(pipeline_scope, LIT("blit")); if (blit_pipeline->success) { - struct command_list *cl = command_list_open(G.cq_direct); + struct command_list *cl = command_list_open(G.cq_direct->cl_pool); { __profscope_dx12(cl->cq->prof, cl->cl, Blit, RGB32_F(0.5, 0.2, 0.2)); struct swapchain *swapchain = dst->swapchain; @@ -2781,7 +2802,6 @@ INTERNAL void present_blit(struct swapchain_buffer *dst, struct dx12_resource *s ID3D12GraphicsCommandList_ResourceBarrier(cl->cl, 1, &rb); dst->state = rtb.StateAfter; } - ID3D12GraphicsCommandList_OMSetRenderTargets(cl->cl, 1, &dst->rtv_descriptor->handle, false, NULL); } command_list_close(cl); }