summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorOmniscient <omniscient.oce@gmail.com>2024-05-20 10:43:32 +1000
committerOmniscient <omniscient.oce@gmail.com>2024-05-20 10:43:32 +1000
commitaf65a3d6c3b5ed750af5bf08e04a4c7e4d0da239 (patch)
tree6d0ada237b9350ce6ed16beccba2ac5a8895a20f /src
parent142d34d679e29d434fdd5ff369ed88240bbe8083 (diff)
cleanup ral.h
Diffstat (limited to 'src')
-rw-r--r--src/renderer/backends/backend_vulkan.c136
-rw-r--r--src/renderer/ral.c33
-rw-r--r--src/renderer/ral.h96
3 files changed, 127 insertions, 138 deletions
diff --git a/src/renderer/backends/backend_vulkan.c b/src/renderer/backends/backend_vulkan.c
index fc6d961..ef266e0 100644
--- a/src/renderer/backends/backend_vulkan.c
+++ b/src/renderer/backends/backend_vulkan.c
@@ -92,7 +92,8 @@ VkShaderModule create_shader_module(str8 spirv);
/** @brief Helper function for creating array of all extensions we want */
cstr_darray* get_all_extensions();
-VkImage vulkan_image_create(u32x2 dimensions, VkImageType image_type, VkFormat format ,VkImageUsageFlags usage);
+VkImage vulkan_image_create(u32x2 dimensions, VkImageType image_type, VkFormat format,
+ VkImageUsageFlags usage);
void vulkan_transition_image_layout(gpu_texture* texture, VkFormat format, VkImageLayout old_layout,
VkImageLayout new_layout);
@@ -580,7 +581,8 @@ gpu_pipeline* gpu_graphics_pipeline_create(struct graphics_pipeline_desc descrip
case SHADER_BINDING_BUFFER:
case SHADER_BINDING_BYTES:
desc_set_bindings[binding_j].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
- desc_set_bindings[binding_j].stageFlags = VK_SHADER_STAGE_VERTEX_BIT; // FIXME: dont hardcode
+ desc_set_bindings[binding_j].stageFlags =
+ VK_SHADER_STAGE_VERTEX_BIT; // FIXME: dont hardcode
u64 buffer_size = sdl.bindings[binding_j].data.bytes.size;
VkDeviceSize uniform_buf_size = buffer_size;
@@ -616,7 +618,8 @@ gpu_pipeline* gpu_graphics_pipeline_create(struct graphics_pipeline_desc descrip
break;
case SHADER_BINDING_TEXTURE:
desc_set_bindings[binding_j].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
- desc_set_bindings[binding_j].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; // FIXME: dont hardcode
+ desc_set_bindings[binding_j].stageFlags =
+ VK_SHADER_STAGE_FRAGMENT_BIT; // FIXME: dont hardcode
desc_set_bindings[binding_j].pImmutableSamplers = NULL;
break;
@@ -748,14 +751,17 @@ gpu_renderpass* gpu_renderpass_create(const gpu_renderpass_desc* description) {
color_attachment_reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
// Depth attachment
- u32x2 ext = { .x = context.swapchain_support.capabilities.currentExtent.width, .y = context.swapchain_support.capabilities.currentExtent.height};
- texture_desc depth_desc = { .extents = ext, .format = CEL_TEXTURE_FORMAT_DEPTH_DEFAULT, .tex_type = CEL_TEXTURE_TYPE_2D};
+ u32x2 ext = { .x = context.swapchain_support.capabilities.currentExtent.width,
+ .y = context.swapchain_support.capabilities.currentExtent.height };
+ texture_desc depth_desc = { .extents = ext,
+ .format = CEL_TEXTURE_FORMAT_DEPTH_DEFAULT,
+ .tex_type = CEL_TEXTURE_TYPE_2D };
texture_handle depth_texture_handle = gpu_texture_create(depth_desc, true, NULL);
gpu_texture* depth = TEXTURE_GET(depth_texture_handle);
VkAttachmentDescription depth_attachment;
- depth_attachment.format = // TODO: context->device.depth_format;
- depth_attachment.samples = VK_SAMPLE_COUNT_1_BIT;
+ depth_attachment.format = // TODO: context->device.depth_format;
+ depth_attachment.samples = VK_SAMPLE_COUNT_1_BIT;
depth_attachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
depth_attachment.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
depth_attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
@@ -808,15 +814,6 @@ gpu_renderpass* gpu_renderpass_create(const gpu_renderpass_desc* description) {
return renderpass;
}
-void encode_set_pipeline(gpu_cmd_encoder* encoder, gpu_pipeline* pipeline) {
- // VK_PIPELINE_BIND_POINT_GRAPHICS, &shader->pipeline);
- // if (kind == PIPELINE_GRAPHICS) {
- // // ...
- // } else {
- // // ...
- // }
-}
-
gpu_cmd_encoder gpu_cmd_encoder_create() {
// gpu_cmd_encoder* encoder = malloc(sizeof(gpu_cmd_encoder)); // TODO: fix leaking mem
gpu_cmd_encoder encoder = { 0 };
@@ -834,7 +831,7 @@ gpu_cmd_encoder gpu_cmd_encoder_create() {
// Uniforms pool
pool_sizes[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
pool_sizes[0].descriptorCount = MAX_FRAMES_IN_FLIGHT * MAX_DESCRIPTOR_SETS;
- // Samplers pool
+ // Samplers pool
pool_sizes[1].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
pool_sizes[1].descriptorCount = MAX_FRAMES_IN_FLIGHT * MAX_DESCRIPTOR_SETS;
@@ -955,7 +952,7 @@ void encode_bind_shader_data(gpu_cmd_encoder* encoder, u32 group, shader_data* d
write_sets[i].dstBinding = i;
write_sets[i].dstArrayElement = 0;
write_sets[i].pImageInfo = image_info;
- }else {
+ } else {
WARN("Unknown binding");
}
}
@@ -1497,7 +1494,8 @@ void copy_buffer_to_image_oneshot(buffer_handle src, texture_handle dst) {
vulkan_command_buffer_finish_oneshot(temp_cmd_buffer);
}
-VkImage vulkan_image_create(u32x2 dimensions, VkImageType image_type, VkFormat format ,VkImageUsageFlags usage) {
+VkImage vulkan_image_create(u32x2 dimensions, VkImageType image_type, VkFormat format,
+ VkImageUsageFlags usage) {
VkImage image;
VkImageCreateInfo image_create_info = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
@@ -1510,7 +1508,7 @@ VkImage vulkan_image_create(u32x2 dimensions, VkImageType image_type, VkFormat f
image_create_info.format = format;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
- image_create_info.usage = usage; // VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
+ image_create_info.usage = usage; // VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
@@ -1523,10 +1521,11 @@ VkImage vulkan_image_create(u32x2 dimensions, VkImageType image_type, VkFormat f
texture_handle gpu_texture_create(texture_desc desc, bool create_view, const void* data) {
VkDeviceSize image_size = desc.extents.x * desc.extents.y * 4;
// FIXME: handle this properly
- VkFormat format = desc.format == CEL_TEXTURE_FORMAT_8_8_8_8_RGBA_UNORM ? VK_FORMAT_R8G8B8A8_SRGB :
- VK_FORMAT_D32_SFLOAT;
+ VkFormat format = desc.format == CEL_TEXTURE_FORMAT_8_8_8_8_RGBA_UNORM ? VK_FORMAT_R8G8B8A8_SRGB
+ : VK_FORMAT_D32_SFLOAT;
- VkImage image; //vulkan_image_create(desc.extents, VK_IMAGE_TYPE_2D, format, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT);
+ VkImage image; // vulkan_image_create(desc.extents, VK_IMAGE_TYPE_2D, format,
+ // VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT);
VkDeviceMemory image_memory;
VkImageCreateInfo image_create_info = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
@@ -1575,10 +1574,13 @@ texture_handle gpu_texture_create(texture_desc desc, bool create_view, const voi
buffer_handle staging =
gpu_buffer_create(image_size, CEL_BUFFER_DEFAULT, CEL_BUFFER_FLAG_CPU, NULL);
// Copy data into it
- vulkan_transition_image_layout(texture, format, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
- buffer_upload_bytes(staging, (bytebuffer){ .buf = (u8*)data, .size = image_size }, 0, image_size);
+ vulkan_transition_image_layout(texture, format, VK_IMAGE_LAYOUT_UNDEFINED,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+ buffer_upload_bytes(staging, (bytebuffer){ .buf = (u8*)data, .size = image_size }, 0,
+ image_size);
copy_buffer_to_image_oneshot(staging, handle);
- vulkan_transition_image_layout(texture, format, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
+ vulkan_transition_image_layout(texture, format, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
gpu_buffer_destroy(staging);
}
@@ -1589,7 +1591,8 @@ texture_handle gpu_texture_create(texture_desc desc, bool create_view, const voi
view_create_info.image = image;
view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
view_create_info.format = format;
- view_create_info.subresourceRange.aspectMask = format == VK_FORMAT_D32_SFLOAT ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
+ view_create_info.subresourceRange.aspectMask =
+ format == VK_FORMAT_D32_SFLOAT ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
view_create_info.subresourceRange.baseMipLevel = 0;
view_create_info.subresourceRange.levelCount = 1;
@@ -1601,29 +1604,29 @@ texture_handle gpu_texture_create(texture_desc desc, bool create_view, const voi
}
// Sampler
- VkSamplerCreateInfo sampler_info = { VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO };
- sampler_info.magFilter = VK_FILTER_LINEAR;
- sampler_info.minFilter = VK_FILTER_LINEAR;
- sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
- sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
- sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
- sampler_info.anisotropyEnable = VK_TRUE;
- sampler_info.maxAnisotropy = 16;
- sampler_info.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK;
- sampler_info.unnormalizedCoordinates = VK_FALSE;
- sampler_info.compareEnable = VK_FALSE;
- sampler_info.compareOp = VK_COMPARE_OP_ALWAYS;
- sampler_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
- sampler_info.mipLodBias = 0.0;
- sampler_info.minLod = 0.0;
- sampler_info.maxLod = 0.0;
-
- VkResult res = vkCreateSampler(context.device->logical_device, &sampler_info, context.allocator,
- &texture->sampler);
- if (res != VK_SUCCESS) {
- ERROR("Error creating texture sampler for image %s", texture->debug_label);
- return;
- }
+ VkSamplerCreateInfo sampler_info = { VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO };
+ sampler_info.magFilter = VK_FILTER_LINEAR;
+ sampler_info.minFilter = VK_FILTER_LINEAR;
+ sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
+ sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
+ sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
+ sampler_info.anisotropyEnable = VK_TRUE;
+ sampler_info.maxAnisotropy = 16;
+ sampler_info.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK;
+ sampler_info.unnormalizedCoordinates = VK_FALSE;
+ sampler_info.compareEnable = VK_FALSE;
+ sampler_info.compareOp = VK_COMPARE_OP_ALWAYS;
+ sampler_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
+ sampler_info.mipLodBias = 0.0;
+ sampler_info.minLod = 0.0;
+ sampler_info.maxLod = 0.0;
+
+ VkResult res = vkCreateSampler(context.device->logical_device, &sampler_info, context.allocator,
+ &texture->sampler);
+ if (res != VK_SUCCESS) {
+ ERROR("Error creating texture sampler for image %s", texture->debug_label);
+ return;
+ }
return handle;
}
@@ -1673,39 +1676,6 @@ void vulkan_transition_image_layout(gpu_texture* texture, VkFormat format, VkIma
vulkan_command_buffer_finish_oneshot(temp_cmd_buffer);
}
-size_t vertex_attrib_size(vertex_attrib_type attr) {
- switch (attr) {
- case ATTR_F32:
- case ATTR_U32:
- case ATTR_I32:
- return 4;
- case ATTR_F32x2:
- case ATTR_U32x2:
- case ATTR_I32x2:
- return 8;
- case ATTR_F32x3:
- case ATTR_U32x3:
- case ATTR_I32x3:
- return 12;
- case ATTR_F32x4:
- case ATTR_U32x4:
- case ATTR_I32x4:
- return 16;
- break;
- }
-}
-
-void vertex_desc_add(vertex_description* builder, const char* name, vertex_attrib_type type) {
- u32 i = builder->attributes_count;
-
- size_t size = vertex_attrib_size(type);
- builder->attributes[i] = type;
- builder->stride += size;
- builder->attr_names[i] = name;
-
- builder->attributes_count++;
-}
-
/* TYPED_POOL(gpu_buffer, buffer); */
/* TYPED_POOL(gpu_texture, texture); */
diff --git a/src/renderer/ral.c b/src/renderer/ral.c
index 304017d..2ae7c23 100644
--- a/src/renderer/ral.c
+++ b/src/renderer/ral.c
@@ -1 +1,34 @@
#include "ral.h"
+
+size_t vertex_attrib_size(vertex_attrib_type attr) {
+ switch (attr) {
+ case ATTR_F32:
+ case ATTR_U32:
+ case ATTR_I32:
+ return 4;
+ case ATTR_F32x2:
+ case ATTR_U32x2:
+ case ATTR_I32x2:
+ return 8;
+ case ATTR_F32x3:
+ case ATTR_U32x3:
+ case ATTR_I32x3:
+ return 12;
+ case ATTR_F32x4:
+ case ATTR_U32x4:
+ case ATTR_I32x4:
+ return 16;
+ break;
+ }
+}
+
+void vertex_desc_add(vertex_description* builder, const char* name, vertex_attrib_type type) {
+ u32 i = builder->attributes_count;
+
+ size_t size = vertex_attrib_size(type);
+ builder->attributes[i] = type;
+ builder->stride += size;
+ builder->attr_names[i] = name;
+
+ builder->attributes_count++;
+}
diff --git a/src/renderer/ral.h b/src/renderer/ral.h
index 3697ea5..4f23ad0 100644
--- a/src/renderer/ral.h
+++ b/src/renderer/ral.h
@@ -18,10 +18,9 @@
#include "str.h"
// Unrelated forward declares
-typedef struct arena arena;
struct GLFWwindow;
-// Forward declare structs
+// Forward declare structs - these must be defined in the backend implementation
typedef struct gpu_swapchain gpu_swapchain;
typedef struct gpu_device gpu_device;
typedef struct gpu_pipeline_layout gpu_pipeline_layout;
@@ -36,15 +35,17 @@ typedef struct gpu_texture gpu_texture;
#define MAX_BUFFERS 256
#define MAX_TEXTURES 256
-/** @brief A*/
-// typedef struct gpu_bind_group
+TYPED_POOL(gpu_buffer, buffer);
+TYPED_POOL(gpu_texture, texture);
-// Pools
-typedef struct gpu_backend_pools {
- // pools for each gpu structure
+// --- Pools
+typedef struct gpu_backend_pools { /* TODO: pools for each gpu structure */
} gpu_backend_pools;
-/* typedef struct resource_pools resource_pools; */
+struct resource_pools {
+ buffer_pool buffers;
+ texture_pool textures;
+};
typedef enum pipeline_kind {
PIPELINE_GRAPHICS,
@@ -64,13 +65,10 @@ struct graphics_pipeline_desc {
shader_desc vs; /** @brief Vertex shader stage */
shader_desc fs; /** @brief Fragment shader stage */
- /* shader_data_layout data_layouts[MAX_SHADER_DATA_LAYOUTS]; */
- /* u32 data_layouts_count; */
-
// Roughly equivalent to a descriptor set layout each. each layout can have multiple bindings
// examples:
// - uniform buffer reprensenting view projection matrix
- // - texture for shadow map ?
+ // - texture for shadow map
shader_data data_layouts[MAX_SHADER_DATA_LAYOUTS];
u32 data_layouts_count;
@@ -81,30 +79,30 @@ struct graphics_pipeline_desc {
bool depth_test;
};
-typedef struct gpu_renderpass_desc {
+typedef struct gpu_renderpass_desc { /* TODO */
} gpu_renderpass_desc;
// --- Lifecycle functions
-
bool gpu_backend_init(const char* window_name, struct GLFWwindow* window);
void gpu_backend_shutdown();
-
-// TEMP
-bool gpu_backend_begin_frame();
-void gpu_backend_end_frame();
+void resource_pools_init(arena* a, struct resource_pools* res_pools);
bool gpu_device_create(gpu_device* out_device);
void gpu_device_destroy();
-gpu_renderpass* gpu_renderpass_create(const gpu_renderpass_desc* description);
-void gpu_renderpass_destroy(gpu_renderpass* pass);
-
+// --- Render Pipeline
gpu_pipeline* gpu_graphics_pipeline_create(struct graphics_pipeline_desc description);
void gpu_pipeline_destroy(gpu_pipeline* pipeline);
+// --- Renderpass
+gpu_renderpass* gpu_renderpass_create(const gpu_renderpass_desc* description);
+void gpu_renderpass_destroy(gpu_renderpass* pass);
+
+// --- Swapchain
bool gpu_swapchain_create(gpu_swapchain* out_swapchain);
void gpu_swapchain_destroy(gpu_swapchain* swapchain);
+// --- Command buffer
gpu_cmd_encoder gpu_cmd_encoder_create();
void gpu_cmd_encoder_destroy(gpu_cmd_encoder* encoder);
void gpu_cmd_encoder_begin(gpu_cmd_encoder encoder);
@@ -113,16 +111,20 @@ void gpu_cmd_encoder_end_render(gpu_cmd_encoder* encoder);
void gpu_cmd_encoder_begin_compute();
gpu_cmd_encoder* gpu_get_default_cmd_encoder();
-/* Actual commands that we can encode */
+// --- Data copy commands
+/** @brief Copy data from one buffer to another */
void encode_buffer_copy(gpu_cmd_encoder* encoder, buffer_handle src, u64 src_offset,
buffer_handle dst, u64 dst_offset, u64 copy_size);
-void encode_clear_buffer(gpu_cmd_encoder* encoder, buffer_handle buf);
-void encode_set_pipeline(gpu_cmd_encoder* encoder, gpu_pipeline* pipeline);
-
/** @brief Upload CPU-side data as array of bytes to a GPU buffer */
void buffer_upload_bytes(buffer_handle gpu_buf, bytebuffer cpu_buf, u64 offset, u64 size);
-// render pass
+/** @brief Copy data from buffer to buffer using a one time submit command buffer and a wait */
+void copy_buffer_to_buffer_oneshot(buffer_handle src, u64 src_offset, buffer_handle dst,
+ u64 dst_offset, u64 copy_size);
+/** @brief Copy data from buffer to an image using a one time submit command buffer */
+void copy_buffer_to_image_oneshot(buffer_handle src, texture_handle dst);
+
+// --- Render commands
void encode_bind_pipeline(gpu_cmd_encoder* encoder, pipeline_kind kind, gpu_pipeline* pipeline);
void encode_bind_shader_data(gpu_cmd_encoder* encoder, u32 group, shader_data* data);
void encode_set_default_settings(gpu_cmd_encoder* encoder);
@@ -131,53 +133,37 @@ void encode_set_index_buffer(gpu_cmd_encoder* encoder, buffer_handle buf);
void encode_set_bind_group(); // TODO
void encode_draw(gpu_cmd_encoder* encoder);
void encode_draw_indexed(gpu_cmd_encoder* encoder, u64 index_count);
+void encode_clear_buffer(gpu_cmd_encoder* encoder, buffer_handle buf);
-// FUTURE: compute passes
/** @brief Finish recording and return a command buffer that can be submitted to a queue */
gpu_cmd_buffer gpu_cmd_encoder_finish(gpu_cmd_encoder* encoder);
void gpu_queue_submit(gpu_cmd_buffer* buffer);
-// Buffers
+// --- Buffers
buffer_handle gpu_buffer_create(u64 size, gpu_buffer_type buf_type, gpu_buffer_flags flags,
const void* data);
void gpu_buffer_destroy(buffer_handle buffer);
-void gpu_buffer_upload();
-void gpu_buffer_bind(buffer_handle buffer);
+void gpu_buffer_upload(const void* data);
// Textures
+/** @brief Create a new GPU texture resource.
+ * @param create_view creates a texture view (with same dimensions) at the same time
+ * @param data if not NULL then the data stored at the pointer will be uploaded to the GPU texture
+ * @note automatically creates a sampler for you */
texture_handle gpu_texture_create(texture_desc desc, bool create_view, const void* data);
-void gpu_texture_destroy();
-void gpu_texture_upload();
-
-// Samplers
-void gpu_sampler_create();
+void gpu_texture_destroy(texture_handle);
+void gpu_texture_upload(texture_handle texture, const void* data);
// --- Vertex formats
bytebuffer vertices_as_bytebuffer(arena* a, vertex_format format, vertex_darray* vertices);
void vertex_desc_add(vertex_description* builder, const char* name, vertex_attrib_type type);
-// TODO: Bindgroup texture samplers / shader resources
-
-// TEMP
-
+// --- TEMP
+bool gpu_backend_begin_frame();
+void gpu_backend_end_frame();
void gpu_temp_draw(size_t n_verts);
-TYPED_POOL(gpu_buffer, buffer);
-TYPED_POOL(gpu_texture, texture);
-
-struct resource_pools {
- buffer_pool buffers;
- texture_pool textures;
-};
-
-// Must be implemented by backends
-void resource_pools_init(arena* a, struct resource_pools* res_pools);
-
-void copy_buffer_to_buffer_oneshot(buffer_handle src, u64 src_offset, buffer_handle dst,
- u64 dst_offset, u64 copy_size);
-void copy_buffer_to_image_oneshot(buffer_handle src, texture_handle dst);
-
-// --- Helpers
+// TODO: --- Compute