remove gp_handle, use raw pointer

This commit is contained in:
jacob 2025-06-27 01:29:36 -05:00
parent 83e10a3fd5
commit 2f8ca7c2f5
10 changed files with 51 additions and 203 deletions

View File

@ -3,7 +3,7 @@
#include "memory.h"
#define ARENA_HEADER_SIZE 64
#define ARENA_HEADER_SIZE 256
#define ARENA_BLOCK_SIZE 4096
#define arena_push(a, type) ((type *)arena_push_bytes((a), sizeof(type), alignof(type)))

View File

@ -7,7 +7,7 @@
#include "collider.h"
GLOBAL struct {
struct gp_handle solid_white_texture;
struct gp_resource *solid_white_texture;
} G = ZI, DEBUG_ALIAS(G, G_draw);
/* ========================== *

View File

@ -22,7 +22,7 @@ struct draw_startup_receipt draw_startup(struct gp_startup_receipt *gp_sr,
struct draw_texture_params {
struct xform xf;
struct gp_handle texture; /* Overrides sprite if set */
struct gp_resource *texture; /* Overrides sprite if set */
struct sprite_tag sprite;
struct clip_rect clip;
u32 tint;

View File

@ -119,7 +119,7 @@ INTERNAL WORK_TASK_FUNC_DEF(font_load_asset_task, vparams)
resource_close(&res);
/* Send texture to GPU */
struct gp_handle texture = gp_texture_alloc(GP_TEXTURE_FORMAT_R8G8B8A8_UNORM, 0, V2I32(result.image_data.width, result.image_data.height), result.image_data.pixels);
struct gp_resource *texture = gp_texture_alloc(GP_TEXTURE_FORMAT_R8G8B8A8_UNORM, 0, V2I32(result.image_data.width, result.image_data.height), result.image_data.pixels);
/* Allocate store memory */
struct font *font = NULL;

View File

@ -21,7 +21,7 @@ struct font_glyph {
};
struct font {
struct gp_handle texture;
struct gp_resource *texture;
u32 image_width;
u32 image_height;
f32 point_size;

View File

@ -12,31 +12,16 @@ struct gp_startup_receipt { i32 _; };
struct gp_startup_receipt gp_startup(struct work_startup_receipt *work_sr);
/* ========================== *
* Handle
* Resource
* ========================== */
struct gp_handle {
union {
/* dx11 style */
u64 v;
/* dx12 style */
struct {
u64 gen;
u64 idx;
};
};
};
struct gp_handle_array {
u64 count;
struct gp_handle **handles;
};
struct gp_resource;
/* NOTE: Internally, the layer will make sure to not release any resources
* until after the GPU finishes using them. However, it is up to the caller
* to make sure the released resources aren't referenced in any flows before
* dispatching. */
void gp_release(struct gp_handle handle);
* to make sure the released resources aren't then referenced in any flow
* dispatches. */
void gp_resource_release(struct gp_resource *resource);
/* ========================== *
* Texture
@ -55,9 +40,9 @@ enum gp_texture_flag {
GP_TEXTURE_FLAG_TARGETABLE = (1 << 0)
};
struct gp_handle gp_texture_alloc(enum gp_texture_format format, u32 flags, struct v2i32 size, void *initial_data);
struct gp_resource *gp_texture_alloc(enum gp_texture_format format, u32 flags, struct v2i32 size, void *initial_data);
struct v2i32 gp_texture_get_size(struct gp_handle texture);
struct v2i32 gp_texture_get_size(struct gp_resource *texture);
/* ========================== *
* Flow
@ -83,7 +68,7 @@ struct gp_cmd_desc {
struct {
struct xform xf;
struct sprite_tag sprite;
struct gp_handle texture;
struct gp_resource *texture;
struct clip_rect clip;
u32 tint;
f32 emittance;
@ -109,7 +94,7 @@ struct gp_cmd_desc {
struct gp_dispatch_params {
struct gp_flow *flow;
struct gp_handle draw_target;
struct gp_resource *draw_target;
struct rect draw_target_viewport;
struct xform draw_target_view;
b32 clear_target;
@ -142,6 +127,6 @@ struct gp_memory_info gp_query_memory_info(void);
/* 1. Clears the backbuffer and ensures its at size `backbuffer_resolution`
* 2. Blits `texture` to the backbuffer using `texture_xf` (applied to centered unit square)
* 3. Presents the backbuffer */
void gp_present(struct sys_window *window, struct v2i32 backbuffer_resolution, struct gp_handle texture, struct xform texture_xf, i32 vsync);
void gp_present(struct sys_window *window, struct v2i32 backbuffer_resolution, struct gp_resource *texture, struct xform texture_xf, i32 vsync);
#endif

View File

@ -174,7 +174,7 @@ struct command_descriptor_heap {
D3D12_DESCRIPTOR_HEAP_TYPE type;
ID3D12DescriptorHeap *heap;
D3D12_CPU_DESCRIPTOR_HANDLE cpu_handle;
D3D12_GPU_DESCRIPTOR_HANDLE gp_handle;
D3D12_GPU_DESCRIPTOR_HANDLE gpu_handle;
struct command_descriptor_heap *next_in_command_list;
@ -268,33 +268,11 @@ struct fenced_release_data {
void *ptr;
};
enum handle_kind {
DX12_HANDLE_KIND_NONE,
DX12_HANDLE_KIND_RESOURCE,
NUM_DX12_HANDLE_KINDS
};
struct handle_entry {
enum handle_kind kind;
u64 gen;
u64 idx;
void *data;
struct handle_entry *next_free;
};
/* ========================== *
* Global state
* ========================== */
GLOBAL struct {
/* Handles pool */
struct sys_mutex *handle_entries_mutex;
struct arena *handle_entries_arena;
struct handle_entry *first_free_handle_entry;
u64 num_handle_entries_reserved;
/* Descriptor heaps pool */
struct sys_mutex *command_descriptor_heaps_mutex;
struct arena *command_descriptor_heaps_arena;
@ -377,9 +355,6 @@ struct gp_startup_receipt gp_startup(struct work_startup_receipt *work_sr)
{
__prof;
(UNUSED)work_sr;
/* Initialize handles pool */
G.handle_entries_mutex = sys_mutex_alloc();
G.handle_entries_arena = arena_alloc(GIGABYTE(64));
/* Initialize command descriptor heaps pool */
G.command_descriptor_heaps_mutex = sys_mutex_alloc();
@ -444,113 +419,6 @@ INTERNAL APP_EXIT_CALLBACK_FUNC_DEF(gp_shutdown)
sys_thread_wait_release(G.evictor_thread);
}
/* ========================== *
* Handle
* ========================== */
INTERNAL struct gp_handle handle_alloc(enum handle_kind kind, void *data)
{
u64 old_gen = 0;
u64 idx = 0;
struct handle_entry *entry = NULL;
{
struct sys_lock lock = sys_mutex_lock_e(G.handle_entries_mutex);
if (G.first_free_handle_entry) {
entry = G.first_free_handle_entry;
G.first_free_handle_entry = entry->next_free;
old_gen = entry->gen;
idx = entry->idx;
} else {
entry = arena_push_no_zero(G.handle_entries_arena, struct handle_entry);
idx = G.num_handle_entries_reserved++;
}
sys_mutex_unlock(&lock);
}
MEMZERO_STRUCT(entry);
entry->kind = kind;
entry->gen = old_gen + 1;
entry->idx = idx;
entry->data = data;
struct gp_handle res = ZI;
res.gen = entry->gen;
res.idx = entry->idx;
return res;
}
INTERNAL struct handle_entry *handle_get_entry(struct gp_handle handle, struct sys_lock *lock)
{
sys_assert_locked_e_or_s(lock, G.handle_entries_mutex);
struct handle_entry *res = NULL;
if (handle.idx > 0 && handle.idx < G.num_handle_entries_reserved) {
struct handle_entry *tmp = &((struct handle_entry *)arena_base(G.handle_entries_arena))[handle.idx];
if (tmp->gen == handle.gen) {
res = tmp;
}
}
return res;
}
INTERNAL void *handle_get_data_locked(struct gp_handle handle, enum handle_kind kind, struct sys_lock *lock)
{
sys_assert_locked_e_or_s(lock, G.handle_entries_mutex);
void *data = NULL;
if (handle.gen) {
struct handle_entry *entry = handle_get_entry(handle, lock);
data = entry->data;
/* Handle should match expected kind */
(UNUSED)kind;
ASSERT(entry->kind == kind);
}
return data;
}
INTERNAL void *handle_get_data(struct gp_handle handle, enum handle_kind kind)
{
void *data = NULL;
if (handle.gen) {
struct sys_lock lock = sys_mutex_lock_e(G.handle_entries_mutex);
data = handle_get_data_locked(handle, kind, &lock);
sys_mutex_unlock(&lock);
}
return data;
}
/* TODO: The GPU api should ensure that resources freed by the caller will not cause issues on the GPU (via fencing),
* however the caller is responsible for managing resource lifetimes on the CPU side (e.g. using sprites w/ sprite scopes
* to ensure freed textures aren't being used in pending command lists. */
void gp_release(struct gp_handle handle)
{
enum handle_kind kind = 0;
void *data = NULL;
/* Release handle entry */
struct sys_lock lock = sys_mutex_lock_e(G.handle_entries_mutex);
{
struct handle_entry *entry = handle_get_entry(handle, &lock);
if (entry) {
kind = entry->kind;
data = entry->data;
}
++entry->gen;
entry->next_free = G.first_free_handle_entry;
G.first_free_handle_entry = entry;
}
sys_mutex_unlock(&lock);
/* Release data */
if (data) {
switch (kind) {
default: break;
case DX12_HANDLE_KIND_RESOURCE:
{
fenced_release(data, FENCED_RELEASE_KIND_RESOURCE);
} break;
}
}
}
/* ========================== *
* Dx12 device initialization
* ========================== */
@ -1543,7 +1411,7 @@ struct flow {
struct material_instance_desc {
struct xform xf;
struct sprite_tag sprite;
struct gp_handle texture;
struct dx12_resource *texture;
struct clip_rect clip;
u32 tint;
f32 emittance;
@ -1617,7 +1485,7 @@ i32 gp_push_cmd(struct gp_flow *gp_flow, struct gp_cmd_desc *cmd_desc)
struct material_instance_desc *instance_desc = arena_push(flow->material_instance_descs_arena, struct material_instance_desc);
instance_desc->xf = cmd_desc->material.xf;
instance_desc->sprite = cmd_desc->material.sprite;
instance_desc->texture = cmd_desc->material.texture;
instance_desc->texture = (struct dx12_resource *)cmd_desc->material.texture;
instance_desc->clip = cmd_desc->material.clip;
instance_desc->tint = cmd_desc->material.tint;
instance_desc->emittance = cmd_desc->material.emittance;
@ -1810,6 +1678,12 @@ INTERNAL enum D3D12_RESOURCE_STATES dx12_resource_barrier(ID3D12GraphicsCommandL
return old_state;
}
void gp_resource_release(struct gp_resource *resource)
{
struct dx12_resource *r = (struct dx12_resource *)resource;
fenced_release(r, FENCED_RELEASE_KIND_RESOURCE);
}
/* ========================== *
* Command queue
* ========================== */
@ -2064,7 +1938,7 @@ INTERNAL struct command_descriptor_heap *command_list_push_descriptor_heap(struc
/* Remove from submitted list */
old_heap = cdh->heap;
old_cpu_handle = cdh->cpu_handle;
old_gpu_handle = cdh->gp_handle;
old_gpu_handle = cdh->gpu_handle;
struct command_descriptor_heap *prev = cdh->prev_submitted;
struct command_descriptor_heap *next = cdh->next_submitted;
if (prev) {
@ -2088,7 +1962,7 @@ INTERNAL struct command_descriptor_heap *command_list_push_descriptor_heap(struc
if (old_heap) {
cdh->heap = old_heap;
cdh->cpu_handle = old_cpu_handle;
cdh->gp_handle = old_gpu_handle;
cdh->gpu_handle = old_gpu_handle;
} else {
D3D12_DESCRIPTOR_HEAP_DESC desc = ZI;
desc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV;
@ -2099,7 +1973,7 @@ INTERNAL struct command_descriptor_heap *command_list_push_descriptor_heap(struc
sys_panic(LIT("Failed to create GPU descriptor heap"));
}
ID3D12DescriptorHeap_GetCPUDescriptorHandleForHeapStart(cdh->heap, &cdh->cpu_handle);
ID3D12DescriptorHeap_GetGPUDescriptorHandleForHeapStart(cdh->heap, &cdh->gp_handle);
ID3D12DescriptorHeap_GetGPUDescriptorHandleForHeapStart(cdh->heap, &cdh->gpu_handle);
}
/* Copy CPU heap */
@ -2312,7 +2186,7 @@ INTERNAL D3D12_INDEX_BUFFER_VIEW ibv_from_command_buffer(struct command_buffer *
* Texture
* ========================== */
struct gp_handle gp_texture_alloc(enum gp_texture_format format, u32 flags, struct v2i32 size, void *initial_data)
struct gp_resource *gp_texture_alloc(enum gp_texture_format format, u32 flags, struct v2i32 size, void *initial_data)
{
__prof;
struct dxgi_format_info { DXGI_FORMAT format; u32 size; };
@ -2465,17 +2339,13 @@ struct gp_handle gp_texture_alloc(enum gp_texture_format format, u32 flags, stru
}
}
return handle_alloc(DX12_HANDLE_KIND_RESOURCE, r);
return (struct gp_resource *)r;
}
struct v2i32 gp_texture_get_size(struct gp_handle resource)
struct v2i32 gp_texture_get_size(struct gp_resource *resource)
{
struct v2i32 res = ZI;
struct dx12_resource *dx12_resource = handle_get_data(resource, DX12_HANDLE_KIND_RESOURCE);
if (dx12_resource) {
res = dx12_resource->texture_size;
}
return res;
struct dx12_resource *r = (struct dx12_resource *)resource;
return r->texture_size;
}
/* ========================== *
@ -2495,6 +2365,7 @@ void gp_dispatch(struct gp_dispatch_params params)
__prof;
struct arena_temp scratch = scratch_begin_no_conflict();
struct flow *flow = (struct flow *)params.flow;
struct dx12_resource *target = (struct dx12_resource *)params.draw_target;
struct sprite_scope *sprite_scope = sprite_scope_begin();
struct pipeline_scope *pipeline_scope = pipeline_scope_begin();
@ -2504,7 +2375,6 @@ void gp_dispatch(struct gp_dispatch_params params)
struct command_list *cl = command_list_open(cq->cl_pool);
{
__profscope_dx12(cl->cq->prof, cl->cl, Dispatch, RGB32_F(0.5, 0.2, 0.2));
struct dx12_resource *target = handle_get_data(params.draw_target, DX12_HANDLE_KIND_RESOURCE);
struct mat4x4 vp_matrix = calculate_vp(params.draw_target_view, params.draw_target_viewport.width, params.draw_target_viewport.height);
/* Upload dummmy vert & index buffer */
@ -2523,19 +2393,15 @@ void gp_dispatch(struct gp_dispatch_params params)
/* Process material instances */
{
__profscope(Process material instances);
struct sys_lock handles_lock = sys_mutex_lock_s(G.handle_entries_mutex);
for (u32 i = 0; i < flow->num_material_instance_descs; ++i) {
struct material_instance_desc *desc = &((struct material_instance_desc *)arena_base(flow->material_instance_descs_arena))[i];
struct sh_material_instance *instance = &material_instances[i];
i32 texture_id = -1;
{
struct dx12_resource *texture = NULL;
if (desc->texture.gen != 0) {
texture = handle_get_data_locked(desc->texture, DX12_HANDLE_KIND_RESOURCE, &handles_lock);
if (desc->texture != 0) {
texture_id = desc->texture->srv_descriptor->index;
} else if (desc->sprite.hash != 0) {
struct sprite_texture *st = sprite_texture_from_tag_async(sprite_scope, desc->sprite);
texture = handle_get_data_locked(st->texture, DX12_HANDLE_KIND_RESOURCE, &handles_lock);
}
struct dx12_resource *texture = (struct dx12_resource *)st->gp_texture;
if (texture) {
texture_id = texture->srv_descriptor->index;
}
@ -2548,7 +2414,6 @@ void gp_dispatch(struct gp_dispatch_params params)
instance->tint_srgb = sh_uint_from_u32(desc->tint);
instance->emittance = sh_float_from_f32(desc->emittance);
}
sys_mutex_unlock(&handles_lock);
}
/* Process grids */
@ -2611,7 +2476,7 @@ void gp_dispatch(struct gp_dispatch_params params)
/* Set descriptor heap */
ID3D12DescriptorHeap *heaps[] = { descriptor_heap->heap };
ID3D12GraphicsCommandList_SetDescriptorHeaps(cl->cl, ARRAY_COUNT(heaps), heaps);
ID3D12GraphicsCommandList_SetGraphicsRootDescriptorTable(cl->cl, 3, descriptor_heap->gp_handle);
ID3D12GraphicsCommandList_SetGraphicsRootDescriptorTable(cl->cl, 3, descriptor_heap->gpu_handle);
/* Setup Rasterizer State */
D3D12_VIEWPORT viewport = viewport_from_rect(params.draw_target_viewport);
@ -2684,7 +2549,6 @@ struct gp_memory_info gp_query_memory_info(void)
IDXGIAdapter3 *dxgiAdapter3 = NULL;
if (SUCCEEDED(hr)) {
hr = IDXGIAdapter_QueryInterface(G.adapter, &IID_IDXGIAdapter3, (void **)&dxgiAdapter3);
ASSERT(SUCCEEDED(hr));
}
if (SUCCEEDED(hr)) {
struct DXGI_QUERY_VIDEO_MEMORY_INFO info = ZI;
@ -2875,7 +2739,7 @@ INTERNAL void present_blit(struct swapchain_buffer *dst, struct dx12_resource *s
/* Set descriptor heap */
ID3D12DescriptorHeap *heaps[] = { descriptor_heap->heap };
ID3D12GraphicsCommandList_SetDescriptorHeaps(cl->cl, ARRAY_COUNT(heaps), heaps);
ID3D12GraphicsCommandList_SetGraphicsRootDescriptorTable(cl->cl, 1, descriptor_heap->gp_handle);
ID3D12GraphicsCommandList_SetGraphicsRootDescriptorTable(cl->cl, 1, descriptor_heap->gpu_handle);
/* Setup Rasterizer State */
ID3D12GraphicsCommandList_RSSetViewports(cl->cl, 1, &viewport);
@ -2909,14 +2773,14 @@ INTERNAL void present_blit(struct swapchain_buffer *dst, struct dx12_resource *s
pipeline_scope_end(pipeline_scope);
}
void gp_present(struct sys_window *window, struct v2i32 backresolution, struct gp_handle texture, struct xform texture_xf, i32 vsync)
void gp_present(struct sys_window *window, struct v2i32 backresolution, struct gp_resource *texture, struct xform texture_xf, i32 vsync)
{
__prof;
//sys_sleep(0.1);
struct swapchain *swapchain = &G.swapchain;
struct swapchain_buffer *swapchain_buffer = update_swapchain(swapchain, window, backresolution);
struct dx12_resource *texture_resource = handle_get_data(texture, DX12_HANDLE_KIND_RESOURCE);
struct dx12_resource *texture_resource = (struct dx12_resource *)texture;
/* Blit */
present_blit(swapchain_buffer, texture_resource, texture_xf);

View File

@ -227,7 +227,7 @@ struct sprite_startup_receipt sprite_startup(struct gp_startup_receipt *gp_sr,
{
struct arena_temp scratch = scratch_begin_no_conflict();
struct image_rgba purple_black_image = generate_purple_black_image(scratch.arena, 64, 64);
G.nil_texture->texture = gp_texture_alloc(GP_TEXTURE_FORMAT_R8G8B8A8_UNORM, 0, V2I32(purple_black_image.width, purple_black_image.height), purple_black_image.pixels);
G.nil_texture->gp_texture = gp_texture_alloc(GP_TEXTURE_FORMAT_R8G8B8A8_UNORM, 0, V2I32(purple_black_image.width, purple_black_image.height), purple_black_image.pixels);
scratch_end(scratch);
}
@ -379,7 +379,7 @@ INTERNAL void cache_entry_load_texture(struct cache_ref ref, struct sprite_tag t
e->texture = arena_push(e->arena, struct sprite_texture);
e->texture->width = decoded.image.width;
e->texture->height = decoded.image.height;
e->texture->texture = gp_texture_alloc(GP_TEXTURE_FORMAT_R8G8B8A8_UNORM_SRGB, 0, V2I32(decoded.image.width, decoded.image.height), decoded.image.pixels);
e->texture->gp_texture = gp_texture_alloc(GP_TEXTURE_FORMAT_R8G8B8A8_UNORM_SRGB, 0, V2I32(decoded.image.width, decoded.image.height), decoded.image.pixels);
e->texture->valid = true;
e->texture->loaded = true;
/* TODO: Query gpu for more accurate texture size in VRAM */
@ -1355,7 +1355,7 @@ INTERNAL SYS_THREAD_ENTRY_POINT_FUNC_DEF(sprite_evictor_thread_entry_point, arg)
for (struct evict_node *en = first_evicted; en; en = en->next_evicted) {
struct cache_entry *n = en->cache_entry;
if (n->kind == CACHE_ENTRY_KIND_TEXTURE && n->texture->valid) {
gp_release(n->texture->texture);
gp_resource_release(n->texture->gp_texture);
}
arena_release(n->arena);
}

View File

@ -48,7 +48,7 @@ void sprite_scope_end(struct sprite_scope *scope);
struct sprite_texture {
b32 loaded;
b32 valid;
struct gp_handle texture;
struct gp_resource *gp_texture;
u32 width;
u32 height;
};

View File

@ -70,9 +70,8 @@ GLOBAL struct {
struct second_stat net_bytes_read;
struct second_stat net_bytes_sent;
/* Gpu handles */
struct gp_handle user_texture;
/* Gpu resources */
struct gp_resource *user_texture;
struct gp_flow *world_gp_flow;
struct gp_flow *ui_gp_flow;
@ -2060,9 +2059,9 @@ INTERNAL void user_update(void)
struct v2i32 backbuffer_resolution = v2_round_to_int(G.screen_size);
/* Allocate user texture */
if (!G.user_texture.v || !v2i32_eq(gp_texture_get_size(G.user_texture), user_resolution)) {
if (G.user_texture.v) {
gp_release(G.user_texture);
if (!G.user_texture || !v2i32_eq(gp_texture_get_size(G.user_texture), user_resolution)) {
if (G.user_texture) {
gp_resource_release(G.user_texture);
}
G.user_texture = gp_texture_alloc(GP_TEXTURE_FORMAT_R8G8B8A8_UNORM, GP_TEXTURE_FLAG_TARGETABLE, user_resolution, NULL);
}