summaryrefslogtreecommitdiff
path: root/src/render
diff options
context:
space:
mode:
authoromniscient <17525998+omnisci3nce@users.noreply.github.com>2024-07-09 23:32:33 +1000
committeromniscient <17525998+omnisci3nce@users.noreply.github.com>2024-07-09 23:32:33 +1000
commit3103f383751a12f8a0bdb22309704f1f826d204c (patch)
tree7da8febddfcc40b15de5d7fc3c9a5215d88c5cab /src/render
parentd5f22a65ab12b289d80b035e45e6f1e9460b82d1 (diff)
wip: some cleanup of ral
Diffstat (limited to 'src/render')
-rw-r--r--src/render/backends/backend_test.c1
-rw-r--r--src/render/backends/metal/README.md1
-rw-r--r--src/render/backends/metal/backend_metal.h74
-rw-r--r--src/render/backends/metal/backend_metal.m285
-rw-r--r--src/render/backends/opengl/README.md1
-rw-r--r--src/render/backends/opengl/backend_opengl.c537
-rw-r--r--src/render/backends/opengl/backend_opengl.h68
-rw-r--r--src/render/backends/opengl/opengl_helpers.h74
-rw-r--r--src/render/backends/vulkan/README.md1
-rw-r--r--src/render/backends/vulkan/backend_vulkan.c1705
-rw-r--r--src/render/backends/vulkan/backend_vulkan.h118
-rw-r--r--src/render/backends/vulkan/vulkan_glossary.md18
-rw-r--r--src/render/backends/vulkan_helpers.h199
-rw-r--r--src/render/bind_group_layouts.h30
-rw-r--r--src/render/builtin_materials.h154
-rw-r--r--src/render/immediate.c46
-rw-r--r--src/render/immediate.h19
-rw-r--r--src/render/ral.c97
-rw-r--r--src/render/ral.h198
-rw-r--r--src/render/ral_types.h257
-rw-r--r--src/render/render.c287
-rw-r--r--src/render/render.h96
-rw-r--r--src/render/render_types.h181
-rw-r--r--src/render/renderpasses.c140
-rw-r--r--src/render/renderpasses.h56
-rw-r--r--src/render/static_pipeline.h30
26 files changed, 4673 insertions, 0 deletions
diff --git a/src/render/backends/backend_test.c b/src/render/backends/backend_test.c
new file mode 100644
index 0000000..6347e27
--- /dev/null
+++ b/src/render/backends/backend_test.c
@@ -0,0 +1 @@
+// #FUTURE \ No newline at end of file
diff --git a/src/render/backends/metal/README.md b/src/render/backends/metal/README.md
new file mode 100644
index 0000000..f87f5c1
--- /dev/null
+++ b/src/render/backends/metal/README.md
@@ -0,0 +1 @@
+# TODO \ No newline at end of file
diff --git a/src/render/backends/metal/backend_metal.h b/src/render/backends/metal/backend_metal.h
new file mode 100644
index 0000000..9561bb6
--- /dev/null
+++ b/src/render/backends/metal/backend_metal.h
@@ -0,0 +1,74 @@
+#pragma once
+// #define CEL_REND_BACKEND_METAL
+#if defined(CEL_REND_BACKEND_METAL)
+
+#include "defines.h"
+#include "maths_types.h"
+#ifdef __OBJC__
+#import <Foundation/Foundation.h>
+#import <Metal/Metal.h>
+#import <MetalKit/MetalKit.h>
+#import <QuartzCore/CAMetalLayer.h>
+#else
+typedef void* id;
+#endif
+
+typedef struct gpu_swapchain {
+ u32x2 dimensions;
+#ifdef __OBJC__
+ CAMetalLayer* swapchain;
+#else
+ void* swapchain;
+#endif
+} gpu_swapchain;
+typedef struct gpu_device {
+/** @brief `device` gives us access to our GPU */
+#ifdef __OBJC__
+ id<MTLDevice> id;
+#else
+ void* id;
+#endif
+} gpu_device;
+typedef struct gpu_pipeline_layout {
+ void* pad;
+} gpu_pipeline_layout;
+typedef struct gpu_pipeline {
+#ifdef __OBJC__
+ id<MTLRenderPipelineState> pipeline_state;
+#else
+ void* pipeline_state;
+#endif
+} gpu_pipeline;
+typedef struct gpu_renderpass {
+#ifdef __OBJC__
+ MTLRenderPassDescriptor* rpass_descriptor;
+#else
+ void* rpass_descriptor;
+#endif
+} gpu_renderpass;
+typedef struct gpu_cmd_encoder {
+#ifdef __OBJC__
+ id<MTLCommandBuffer> cmd_buffer;
+ id<MTLRenderCommandEncoder> render_encoder;
+#else
+ void* cmd_buffer;
+ void* render_encoder;
+#endif
+} gpu_cmd_encoder;
+typedef struct gpu_cmd_buffer {
+ void* pad;
+} gpu_cmd_buffer;
+
+typedef struct gpu_buffer {
+#ifdef __OBJC__
+ id<MTLBuffer> id;
+#else
+ void* id;
+#endif
+ u64 size;
+} gpu_buffer;
+typedef struct gpu_texture {
+ void* pad;
+} gpu_texture;
+
+#endif \ No newline at end of file
diff --git a/src/render/backends/metal/backend_metal.m b/src/render/backends/metal/backend_metal.m
new file mode 100644
index 0000000..4787755
--- /dev/null
+++ b/src/render/backends/metal/backend_metal.m
@@ -0,0 +1,285 @@
+#include <assert.h>
+// #define CEL_REND_BACKEND_METAL
+#if defined(CEL_REND_BACKEND_METAL)
+#include <stddef.h>
+#include "ral_types.h"
+#include "colours.h"
+#include <stdlib.h>
+#include "camera.h"
+#include "defines.h"
+#include "file.h"
+#include "log.h"
+#include "maths_types.h"
+#include "ral.h"
+
+#define GLFW_INCLUDE_NONE
+#define GLFW_EXPOSE_NATIVE_COCOA
+
+#include <GLFW/glfw3.h>
+#include <GLFW/glfw3native.h>
+
+#import <Foundation/Foundation.h>
+#import <Metal/Metal.h>
+#import <MetalKit/MetalKit.h>
+#import <QuartzCore/CAMetalLayer.h>
+#include "backend_metal.h"
+
+// --- Handy macros
+#define BUFFER_GET(h) (buffer_pool_get(&context.resource_pools->buffers, h))
+#define TEXTURE_GET(h) (texture_pool_get(&context.resource_pools->textures, h))
+
+typedef struct metal_context {
+ GLFWwindow* window;
+ NSWindow* metal_window;
+ arena pool_arena;
+
+ gpu_device* device;
+ gpu_swapchain* swapchain;
+ id<CAMetalDrawable> surface;
+
+ id<MTLCommandQueue> command_queue;
+ gpu_cmd_encoder main_command_buf;
+ gpu_backend_pools gpu_pools;
+ struct resource_pools* resource_pools;
+} metal_context;
+
+static metal_context context;
+
+struct GLFWwindow;
+
+bool gpu_backend_init(const char *window_name, struct GLFWwindow *window) {
+ INFO("loading Metal backend");
+
+ memset(&context, 0, sizeof(metal_context));
+ context.window = window;
+
+ size_t pool_buffer_size = 1024 * 1024;
+ context.pool_arena = arena_create(malloc(pool_buffer_size), pool_buffer_size);
+
+ backend_pools_init(&context.pool_arena, &context.gpu_pools);
+ context.resource_pools = malloc(sizeof(struct resource_pools));
+ resource_pools_init(&context.pool_arena, context.resource_pools);
+
+ glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
+
+ glfwMakeContextCurrent(window);
+ // FIXME: glfwSetFramebufferSizeCallback(ren->window, framebuffer_size_callback);
+
+ // get a NSWindow pointer from GLFWwindow
+ NSWindow *nswindow = glfwGetCocoaWindow(window);
+ context.metal_window = nswindow;
+
+ // const id<MTLCommandQueue> queue = [gpu newCommandQueue];
+ // CAMetalLayer *swapchain = [CAMetalLayer layer];
+ // swapchain.device = gpu;
+ // swapchain.opaque = YES;
+
+ // // set swapchain for the window
+ // nswindow.contentView.layer = swapchain;
+ // nswindow.contentView.wantsLayer = YES;
+
+ // MTLClearColor color = MTLClearColorMake(0.7, 0.1, 0.2, 1.0);
+
+ // // set all our state properties
+ // state->device = gpu;
+ // state->cmd_queue = queue;
+ // state->swapchain = swapchain;
+ // state->clear_color = color;
+
+ // NSError *err = 0x0; // TEMPORARY
+
+ // WARN("About to try loading metallib");
+ // id<MTLLibrary> defaultLibrary = [state->device newLibraryWithFile: @"build/gfx.metallib" error:&err];
+ // CASSERT(defaultLibrary);
+ // state->default_lib = defaultLibrary;
+ // if (!state->default_lib) {
+ // NSLog(@"Failed to load library");
+ // exit(0);
+ // }
+
+ // create_render_pipeline(state);
+
+ return true;
+}
+
+void gpu_backend_shutdown() {}
+
+bool gpu_device_create(gpu_device* out_device) {
+ TRACE("GPU Device creation");
+ const id<MTLDevice> gpu = MTLCreateSystemDefaultDevice();
+ out_device->id = gpu;
+ context.device = out_device;
+
+ const id<MTLCommandQueue> queue = [gpu newCommandQueue];
+ context.command_queue = queue;
+
+ return true;
+}
+void gpu_device_destroy() {}
+
+// --- Render Pipeline
+gpu_pipeline* gpu_graphics_pipeline_create(struct graphics_pipeline_desc description) {
+ TRACE("GPU Graphics Pipeline creation");
+ // Allocate
+ // gpu_pipeline_layout* layout =
+ // pipeline_layout_pool_alloc(&context.gpu_pools.pipeline_layouts, NULL);
+ gpu_pipeline* pipeline = pipeline_pool_alloc(&context.gpu_pools.pipelines, NULL);
+
+ WARN("About to try loading metallib");
+ assert(description.vs.is_combined_vert_frag);
+ // Ignore fragment shader data, as vert shader data contains both
+ NSError *err = 0x0; // TEMPORARY
+ NSString *myNSString = [NSString stringWithUTF8String:(char*)description.vs.filepath.buf];
+ id<MTLLibrary> default_library = [context.device->id newLibraryWithFile:myNSString error:&err];
+ assert(default_library);
+
+ // setup vertex and fragment shaders
+ id<MTLFunction> ren_vert = [default_library newFunctionWithName:@"basic_vertex"];
+ assert(ren_vert);
+ id<MTLFunction> ren_frag = [default_library newFunctionWithName:@"basic_fragment"];
+ assert(ren_frag);
+
+ // create pipeline descriptor
+ @autoreleasepool {
+ NSError *err = 0x0;
+ MTLRenderPipelineDescriptor *pld = [[MTLRenderPipelineDescriptor alloc] init];
+ NSString *pipeline_name = [NSString stringWithUTF8String: description.debug_name];
+ pld.label = pipeline_name;
+ pld.vertexFunction = ren_vert;
+ pld.fragmentFunction = ren_frag;
+ pld.colorAttachments[0].pixelFormat = MTLPixelFormatBGRA8Unorm;
+ pld.colorAttachments[0].blendingEnabled = YES;
+
+ MTLDepthStencilDescriptor *depthStencilDescriptor = [MTLDepthStencilDescriptor new];
+ depthStencilDescriptor.depthCompareFunction = MTLCompareFunctionLess;
+ depthStencilDescriptor.depthWriteEnabled = YES;
+ pld.depthAttachmentPixelFormat = MTLPixelFormatDepth32Float_Stencil8;
+
+ id<MTLDepthStencilState> depth_descriptor = [context.device->id newDepthStencilStateWithDescriptor:depthStencilDescriptor];
+ // FIXME: state->depth_state = depth_descriptor;
+
+ id<MTLRenderPipelineState> pipeline_state = [context.device->id newRenderPipelineStateWithDescriptor:pld error:&err];
+ TRACE("created renderpipelinestate");
+ pipeline->pipeline_state = pipeline_state;
+
+ }
+
+ return pipeline;
+}
+void gpu_pipeline_destroy(gpu_pipeline* pipeline) {}
+
+// --- Renderpass
+gpu_renderpass* gpu_renderpass_create(const gpu_renderpass_desc* description) {
+ gpu_renderpass* renderpass = renderpass_pool_alloc(&context.gpu_pools.renderpasses, NULL);
+
+ // TODO: Configure based on description
+ // set up render pass
+ context.surface = [context.swapchain->swapchain nextDrawable];
+ MTLRenderPassDescriptor *renderPassDescriptor = [[MTLRenderPassDescriptor alloc] init];
+ MTLRenderPassColorAttachmentDescriptor *cd = renderPassDescriptor.colorAttachments[0];
+ [cd setTexture:context.surface.texture];
+ [cd setLoadAction:MTLLoadActionClear];
+ MTLClearColor clearColor = MTLClearColorMake(0.1, 0.1, 0.0, 1.0);
+ [cd setClearColor:clearColor];
+ [cd setStoreAction:MTLStoreActionStore];
+
+ renderpass->rpass_descriptor = renderPassDescriptor;
+
+ return renderpass;
+}
+
+void gpu_renderpass_destroy(gpu_renderpass* pass) {}
+
+// --- Swapchain
+bool gpu_swapchain_create(gpu_swapchain* out_swapchain) {
+ TRACE("GPU Swapchain creation");
+ CAMetalLayer *swapchain = [CAMetalLayer layer];
+ swapchain.device = context.device->id;
+ swapchain.opaque = YES;
+ out_swapchain->swapchain = swapchain;
+
+ // set swapchain for the window
+ context.metal_window.contentView.layer = swapchain;
+ context.metal_window.contentView.wantsLayer = YES;
+
+ context.swapchain = out_swapchain;
+ return true;
+}
+void gpu_swapchain_destroy(gpu_swapchain* swapchain) {}
+
+// --- Command buffer
+gpu_cmd_encoder gpu_cmd_encoder_create() {
+ id <MTLCommandBuffer> cmd_buffer = [context.command_queue commandBuffer];
+
+ return (gpu_cmd_encoder) {
+ .cmd_buffer = cmd_buffer
+ };
+}
+void gpu_cmd_encoder_destroy(gpu_cmd_encoder* encoder) {}
+void gpu_cmd_encoder_begin(gpu_cmd_encoder encoder) { /* no-op */ }
+void gpu_cmd_encoder_begin_render(gpu_cmd_encoder* encoder, gpu_renderpass* renderpass) {
+ DEBUG("Create Render Command Encoder");
+ id<MTLRenderCommandEncoder> render_encoder = [encoder->cmd_buffer renderCommandEncoderWithDescriptor:renderpass->rpass_descriptor];
+ encoder->render_encoder = render_encoder;
+ // [encoder setDepthStencilState:state->depth_state];
+}
+void gpu_cmd_encoder_end_render(gpu_cmd_encoder* encoder) {}
+void gpu_cmd_encoder_begin_compute() {}
+gpu_cmd_encoder* gpu_get_default_cmd_encoder() {
+ return &context.main_command_buf;
+}
+
+/** @brief Finish recording and return a command buffer that can be submitted to a queue */
+gpu_cmd_buffer gpu_cmd_encoder_finish(gpu_cmd_encoder* encoder) {}
+
+void gpu_queue_submit(gpu_cmd_buffer* buffer) {}
+
+void encode_buffer_copy(gpu_cmd_encoder* encoder, buffer_handle src, u64 src_offset,
+ buffer_handle dst, u64 dst_offset, u64 copy_size);
+void buffer_upload_bytes(buffer_handle gpu_buf, bytebuffer cpu_buf, u64 offset, u64 size);
+
+void copy_buffer_to_buffer_oneshot(buffer_handle src, u64 src_offset, buffer_handle dst,
+ u64 dst_offset, u64 copy_size);
+void copy_buffer_to_image_oneshot(buffer_handle src, texture_handle dst);
+
+void encode_bind_pipeline(gpu_cmd_encoder* encoder, pipeline_kind kind, gpu_pipeline* pipeline) {}
+void encode_bind_shader_data(gpu_cmd_encoder* encoder, u32 group, shader_data* data) {}
+void encode_set_default_settings(gpu_cmd_encoder* encoder) {
+ [encoder->render_encoder setCullMode:MTLCullModeBack];
+}
+void encode_set_vertex_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) {
+ gpu_buffer* vertex_buf = BUFFER_GET(buf);
+ [encoder->render_encoder setVertexBuffer:vertex_buf->id offset:0 atIndex:0];
+}
+void encode_set_index_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) {}
+void encode_set_bind_group() {}
+void encode_draw(gpu_cmd_encoder* encoder) {}
+void encode_draw_indexed(gpu_cmd_encoder* encoder, u64 index_count) {}
+void encode_clear_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) {}
+
+buffer_handle gpu_buffer_create(u64 size, gpu_buffer_type buf_type, gpu_buffer_flags flags,
+ const void* data) {
+ buffer_handle handle;
+ gpu_buffer* buffer = buffer_pool_alloc(&context.resource_pools->buffers, &handle);
+ buffer->size = size;
+
+ id<MTLBuffer> mtl_vert_buf = [context.device->id newBufferWithBytes:data
+ length: size
+ options:MTLResourceStorageModeShared];
+ return handle;
+}
+void gpu_buffer_destroy(buffer_handle buffer) {}
+void gpu_buffer_upload(const void* data) {}
+
+texture_handle gpu_texture_create(texture_desc desc, bool create_view, const void* data) {}
+void gpu_texture_destroy(texture_handle) {}
+void gpu_texture_upload(texture_handle texture, const void* data) {}
+
+bool gpu_backend_begin_frame() {
+ context.main_command_buf.cmd_buffer = [context.command_queue commandBuffer];
+ return true;
+ }
+void gpu_backend_end_frame() {}
+void gpu_temp_draw(size_t n_verts) {}
+
+#endif \ No newline at end of file
diff --git a/src/render/backends/opengl/README.md b/src/render/backends/opengl/README.md
new file mode 100644
index 0000000..f87f5c1
--- /dev/null
+++ b/src/render/backends/opengl/README.md
@@ -0,0 +1 @@
+# TODO \ No newline at end of file
diff --git a/src/render/backends/opengl/backend_opengl.c b/src/render/backends/opengl/backend_opengl.c
new file mode 100644
index 0000000..70e10d7
--- /dev/null
+++ b/src/render/backends/opengl/backend_opengl.c
@@ -0,0 +1,537 @@
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include "builtin_materials.h"
+#include "colours.h"
+#include "maths.h"
+#include "opengl_helpers.h"
+#include "ral_types.h"
+#define CEL_REND_BACKEND_OPENGL
+#if defined(CEL_REND_BACKEND_OPENGL)
+#include <assert.h>
+#include <stdlib.h>
+
+#include "backend_opengl.h"
+#include "defines.h"
+#include "file.h"
+#include "log.h"
+#include "maths_types.h"
+#include "ral.h"
+
+#include <glad/glad.h>
+#include <glfw3.h>
+
+typedef struct opengl_context {
+ GLFWwindow* window;
+ arena pool_arena;
+ gpu_cmd_encoder command_buffer;
+ gpu_backend_pools gpu_pools;
+ struct resource_pools* resource_pools;
+} opengl_context;
+
+static opengl_context context;
+
+struct GLFWwindow;
+
+bool gpu_backend_init(const char* window_name, struct GLFWwindow* window) {
+ INFO("loading OpenGL backend");
+
+ memset(&context, 0, sizeof(opengl_context));
+ context.window = window;
+
+ size_t pool_buffer_size = 1024 * 1024;
+ context.pool_arena = arena_create(malloc(pool_buffer_size), pool_buffer_size);
+
+ backend_pools_init(&context.pool_arena, &context.gpu_pools);
+ context.resource_pools = malloc(sizeof(struct resource_pools));
+ resource_pools_init(&context.pool_arena, context.resource_pools);
+
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
+ glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
+ glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
+
+ // glad: load all opengl function pointers
+ if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress)) {
+ ERROR("Failed to initialise GLAD \n");
+ return false;
+ }
+
+ glEnable(GL_DEPTH_TEST);
+ glEnable(GL_CULL_FACE);
+
+ return true;
+}
+
+void gpu_backend_shutdown() {}
+
+bool gpu_device_create(gpu_device* out_device) { /* No-op in OpenGL */ }
+void gpu_device_destroy() { /* No-op in OpenGL */ }
+
+// --- Render Pipeline
+gpu_pipeline* gpu_graphics_pipeline_create(struct graphics_pipeline_desc description) {
+ gpu_pipeline* pipeline = pipeline_pool_alloc(&context.gpu_pools.pipelines, NULL);
+
+ // Create shader program
+ u32 shader_id = shader_create_separate(description.vs.filepath.buf, description.fs.filepath.buf);
+ pipeline->shader_id = shader_id;
+
+ // Vertex format
+ pipeline->vertex_desc = description.vertex_desc;
+
+ // Allocate uniform buffers if needed
+ u32 ubo_count = 0;
+ // printf("data layouts %d\n", description.data_layouts_count);
+ for (u32 layout_i = 0; layout_i < description.data_layouts_count; layout_i++) {
+ shader_data_layout sdl = description.data_layouts[layout_i].shader_data_get_layout(NULL);
+ TRACE("Got shader data layout %d's bindings! . found %d", layout_i, sdl.bindings_count);
+
+ for (u32 binding_j = 0; binding_j < sdl.bindings_count; binding_j++) {
+ u32 binding_id = binding_j;
+ assert(binding_id < MAX_PIPELINE_UNIFORM_BUFFERS);
+ shader_binding binding = sdl.bindings[binding_j];
+ if (binding.type == SHADER_BINDING_BYTES) {
+ static u32 s_binding_point = 0;
+ buffer_handle ubo_handle =
+ gpu_buffer_create(binding.data.bytes.size, CEL_BUFFER_UNIFORM, CEL_BUFFER_FLAG_GPU,
+ NULL); // no data right now
+ pipeline->uniform_bindings[ubo_count++] = ubo_handle;
+ gpu_buffer* ubo_buf = BUFFER_GET(ubo_handle);
+
+ i32 blockIndex = glGetUniformBlockIndex(pipeline->shader_id, binding.label);
+ printf("Block index for %s: %d", binding.label, blockIndex);
+ if (blockIndex < 0) {
+ WARN("Couldn't retrieve block index for uniform block '%s'", binding.label);
+ } else {
+ // DEBUG("Retrived block index %d for %s", blockIndex, binding.label);
+ }
+ u32 blocksize;
+ glGetActiveUniformBlockiv(pipeline->shader_id, blockIndex, GL_UNIFORM_BLOCK_DATA_SIZE,
+ &blocksize);
+ printf("\t with size %d bytes\n", blocksize);
+
+ glBindBufferBase(GL_UNIFORM_BUFFER, s_binding_point, ubo_buf->id.ubo);
+ if (blockIndex != GL_INVALID_INDEX) {
+ glUniformBlockBinding(pipeline->shader_id, blockIndex, s_binding_point);
+ }
+ ubo_buf->ubo_binding_point = s_binding_point++;
+ ubo_buf->name = binding.label;
+ assert(s_binding_point < GL_MAX_UNIFORM_BUFFER_BINDINGS);
+ }
+ }
+ }
+ pipeline->uniform_count = ubo_count;
+
+ pipeline->renderpass = description.renderpass;
+ pipeline->wireframe = description.wireframe;
+
+ return pipeline;
+}
+void gpu_pipeline_destroy(gpu_pipeline* pipeline) {}
+
+// --- Renderpass
+gpu_renderpass* gpu_renderpass_create(const gpu_renderpass_desc* description) {
+ gpu_renderpass* renderpass = renderpass_pool_alloc(&context.gpu_pools.renderpasses, NULL);
+ memcpy(&renderpass->description, description, sizeof(gpu_renderpass_desc));
+ bool default_framebuffer = description->default_framebuffer;
+
+ if (!default_framebuffer) {
+ GLuint gl_fbo_id;
+ glGenFramebuffers(1, &gl_fbo_id);
+ renderpass->fbo = gl_fbo_id;
+ } else {
+ renderpass->fbo = OPENGL_DEFAULT_FRAMEBUFFER;
+ assert(!description->has_color_target);
+ assert(!description->has_depth_stencil);
+ }
+ glBindFramebuffer(GL_FRAMEBUFFER, renderpass->fbo);
+
+ if (description->has_color_target && !default_framebuffer) {
+ gpu_texture* colour_attachment = TEXTURE_GET(description->color_target);
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
+ colour_attachment->id, 0);
+ }
+ if (description->has_depth_stencil && !default_framebuffer) {
+ gpu_texture* depth_attachment = TEXTURE_GET(description->depth_stencil);
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depth_attachment->id,
+ 0);
+ }
+
+ if (description->has_depth_stencil && !description->has_color_target) {
+ glDrawBuffer(GL_NONE);
+ glReadBuffer(GL_NONE);
+ }
+
+ glBindFramebuffer(GL_FRAMEBUFFER, 0); // reset to default framebuffer
+
+ return renderpass;
+}
+void gpu_renderpass_destroy(gpu_renderpass* pass) { glDeleteFramebuffers(1, &pass->fbo); }
+
+// --- Swapchain
+bool gpu_swapchain_create(gpu_swapchain* out_swapchain) {}
+void gpu_swapchain_destroy(gpu_swapchain* swapchain) {}
+
+// --- Command buffer
+gpu_cmd_encoder gpu_cmd_encoder_create() {
+ gpu_cmd_encoder encoder = { 0 };
+ return encoder;
+}
+void gpu_cmd_encoder_destroy(gpu_cmd_encoder* encoder) {}
+void gpu_cmd_encoder_begin(gpu_cmd_encoder encoder) {}
+void gpu_cmd_encoder_begin_render(gpu_cmd_encoder* encoder, gpu_renderpass* renderpass) {
+ // glViewport(0, 0, 1000, 1000);
+ glBindFramebuffer(GL_FRAMEBUFFER, renderpass->fbo);
+ rgba clear_colour = STONE_800;
+ glClearColor(clear_colour.r, clear_colour.g, clear_colour.b, 1.0f);
+ /* glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); */
+ // FIXME: account for both
+ if (renderpass->description.has_depth_stencil) {
+ glClear(GL_DEPTH_BUFFER_BIT);
+ } else {
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+ }
+}
+void gpu_cmd_encoder_end_render(gpu_cmd_encoder* encoder) { glBindFramebuffer(GL_FRAMEBUFFER, 0); }
+void gpu_cmd_encoder_begin_compute() {}
+gpu_cmd_encoder* gpu_get_default_cmd_encoder() { return &context.command_buffer; }
+
+/** @brief Finish recording and return a command buffer that can be submitted to a queue */
+gpu_cmd_buffer gpu_cmd_encoder_finish(gpu_cmd_encoder* encoder) {}
+
+void gpu_queue_submit(gpu_cmd_buffer* buffer) {}
+
+// --- Data copy commands
+/** @brief Copy data from one buffer to another */
+void encode_buffer_copy(gpu_cmd_encoder* encoder, buffer_handle src, u64 src_offset,
+ buffer_handle dst, u64 dst_offset, u64 copy_size) {}
+/** @brief Upload CPU-side data as array of bytes to a GPU buffer */
+void buffer_upload_bytes(buffer_handle gpu_buf, bytebuffer cpu_buf, u64 offset, u64 size) {
+ // TODO: finish implementing this
+ gpu_buffer* buf = BUFFER_GET(gpu_buf);
+}
+
+/** @brief Copy data from buffer to buffer using a one time submit command buffer and a wait */
+void copy_buffer_to_buffer_oneshot(buffer_handle src, u64 src_offset, buffer_handle dst,
+ u64 dst_offset, u64 copy_size) {}
+/** @brief Copy data from buffer to an image using a one time submit command buffer */
+void copy_buffer_to_image_oneshot(buffer_handle src, texture_handle dst) {}
+
+// --- Render commands
+void encode_bind_pipeline(gpu_cmd_encoder* encoder, pipeline_kind kind, gpu_pipeline* pipeline) {
+ encoder->pipeline = pipeline;
+
+ if (pipeline->wireframe) {
+ glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
+ } else {
+ glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
+ }
+
+ // In OpenGL binding a pipeline is more or less equivalent to just setting the shader
+ glUseProgram(pipeline->shader_id);
+}
+void encode_bind_shader_data(gpu_cmd_encoder* encoder, u32 group, shader_data* data) {
+ shader_data_layout sdl = data->shader_data_get_layout(data->data);
+ // printf("Binding %s shader data\n", sdl.name);
+
+ for (u32 i = 0; i < sdl.bindings_count; i++) {
+ shader_binding binding = sdl.bindings[i];
+ /* print_shader_binding(binding); */
+
+ if (binding.type == SHADER_BINDING_BYTES) {
+ buffer_handle b;
+ gpu_buffer* ubo_buf;
+ bool found = false;
+ for (u32 i = 0; i < encoder->pipeline->uniform_count; i++) {
+ b = encoder->pipeline->uniform_bindings[i];
+ ubo_buf = BUFFER_GET(b);
+ assert(ubo_buf->name != NULL);
+ if (strcmp(ubo_buf->name, binding.label) == 0) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ ERROR("Couldnt find uniform buffer object!!");
+ }
+
+ i32 blockIndex = glGetUniformBlockIndex(encoder->pipeline->shader_id, binding.label);
+ if (blockIndex < 0) {
+ WARN("Couldn't retrieve block index for uniform block '%s'", binding.label);
+ } else {
+ // DEBUG("Retrived block index %d for %s", blockIndex, binding.label);
+ }
+
+ glBindBuffer(GL_UNIFORM_BUFFER, ubo_buf->id.ubo);
+ glBufferSubData(GL_UNIFORM_BUFFER, 0, ubo_buf->size, binding.data.bytes.data);
+
+ } else if (binding.type == SHADER_BINDING_TEXTURE) {
+ gpu_texture* tex = TEXTURE_GET(binding.data.texture.handle);
+ GLint tex_slot = glGetUniformLocation(encoder->pipeline->shader_id, binding.label);
+ // printf("%d slot \n", tex_slot);
+ if (tex_slot == GL_INVALID_VALUE || tex_slot < 0) {
+ WARN("Invalid binding label for texture %s - couldn't fetch texture slot uniform",
+ binding.label);
+ }
+ glUniform1i(tex_slot, i);
+ glActiveTexture(GL_TEXTURE0 + i);
+ glBindTexture(GL_TEXTURE_2D, tex->id);
+ }
+ }
+}
+void encode_set_default_settings(gpu_cmd_encoder* encoder) {}
+void encode_set_vertex_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) {
+ gpu_buffer* buffer = BUFFER_GET(buf);
+ if (buffer->vao == 0) { // if no VAO for this vertex buffer, create it
+ INFO("Setting up VAO");
+ buffer->vao = opengl_bindcreate_vao(buffer, encoder->pipeline->vertex_desc);
+ }
+ glBindVertexArray(buffer->vao);
+}
+void encode_set_index_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) {
+ gpu_buffer* buffer = BUFFER_GET(buf);
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer->id.ibo);
+}
+void encode_draw(gpu_cmd_encoder* encoder, u64 count) { glDrawArrays(GL_TRIANGLES, 0, count); }
+void encode_draw_indexed(gpu_cmd_encoder* encoder, u64 index_count) {
+ /* printf("Draw %ld indices\n", index_count); */
+ glDrawElements(GL_TRIANGLES, index_count, GL_UNSIGNED_INT, 0);
+}
+void encode_clear_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) {}
+
+// --- Buffers
+buffer_handle gpu_buffer_create(u64 size, gpu_buffer_type buf_type, gpu_buffer_flags flags,
+ const void* data) {
+ // "allocating" the cpu-side buffer struct
+ buffer_handle handle;
+ gpu_buffer* buffer = buffer_pool_alloc(&context.resource_pools->buffers, &handle);
+ buffer->size = size;
+ buffer->vao = 0; // When we create a new buffer, there will be no VAO.
+
+ // Opengl buffer
+ GLuint gl_buffer_id;
+ glGenBuffers(1, &gl_buffer_id);
+
+ GLenum gl_buf_type;
+ GLenum gl_buf_usage = GL_STATIC_DRAW;
+
+ switch (buf_type) {
+ case CEL_BUFFER_UNIFORM:
+ DEBUG("Creating Uniform buffer");
+ gl_buf_type = GL_UNIFORM_BUFFER;
+ /* gl_buf_usage = GL_DYNAMIC_DRAW; */
+ buffer->id.ubo = gl_buffer_id;
+ break;
+ case CEL_BUFFER_DEFAULT:
+ case CEL_BUFFER_VERTEX:
+ DEBUG("Creating Vertex buffer");
+ gl_buf_type = GL_ARRAY_BUFFER;
+ buffer->id.vbo = gl_buffer_id;
+ break;
+ case CEL_BUFFER_INDEX:
+ DEBUG("Creating Index buffer");
+ gl_buf_type = GL_ELEMENT_ARRAY_BUFFER;
+ buffer->id.ibo = gl_buffer_id;
+ break;
+ default:
+ WARN("Unimplemented gpu_buffer_type provided %s", buffer_type_names[buf_type]);
+ break;
+ }
+ // bind buffer
+ glBindBuffer(gl_buf_type, gl_buffer_id);
+
+ if (data) {
+ TRACE("Upload data (%d bytes) as part of buffer creation", size);
+ glBufferData(gl_buf_type, buffer->size, data, gl_buf_usage);
+ } else {
+ TRACE("Allocating but not uploading (%d bytes)", size);
+ glBufferData(gl_buf_type, buffer->size, NULL, gl_buf_usage);
+ }
+
+ glBindBuffer(gl_buf_type, 0);
+
+ return handle;
+}
+
+void gpu_buffer_destroy(buffer_handle buffer) {}
+void gpu_buffer_upload(const void* data) {}
+
+texture_handle gpu_texture_create(texture_desc desc, bool create_view, const void* data) {
+ // "allocating" the cpu-side struct
+ texture_handle handle;
+ gpu_texture* texture = texture_pool_alloc(&context.resource_pools->textures, &handle);
+ DEBUG("Allocated texture with handle %d", handle.raw);
+
+ GLuint gl_texture_id;
+ glGenTextures(1, &gl_texture_id);
+ texture->id = gl_texture_id;
+
+ glBindTexture(GL_TEXTURE_2D, gl_texture_id);
+
+ GLint internal_format =
+ desc.format == CEL_TEXTURE_FORMAT_DEPTH_DEFAULT ? GL_DEPTH_COMPONENT : GL_RGB;
+ GLenum format = desc.format == CEL_TEXTURE_FORMAT_DEPTH_DEFAULT ? GL_DEPTH_COMPONENT : GL_RGBA;
+ GLenum data_type = desc.format == CEL_TEXTURE_FORMAT_DEPTH_DEFAULT ? GL_FLOAT : GL_UNSIGNED_BYTE;
+
+ if (desc.format == CEL_TEXTURE_FORMAT_DEPTH_DEFAULT) {
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
+ } else {
+ // set the texture wrapping parameters
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,
+ GL_REPEAT); // set texture wrapping to GL_REPEAT (default wrapping method)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
+ // set texture filtering parameters
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ }
+
+ if (data) {
+ glTexImage2D(GL_TEXTURE_2D, 0, internal_format, desc.extents.x, desc.extents.y, 0, format,
+ data_type, data);
+ glGenerateMipmap(GL_TEXTURE_2D);
+ } else {
+ WARN("No image data provided");
+ glTexImage2D(GL_TEXTURE_2D, 0, internal_format, desc.extents.x, desc.extents.y, 0, format,
+ data_type, NULL);
+ }
+
+ glBindTexture(GL_TEXTURE_2D, 0);
+
+ return handle;
+}
+
+void gpu_texture_destroy(texture_handle) {}
+void gpu_texture_upload(texture_handle texture, const void* data) {}
+
+// --- Vertex formats
+bytebuffer vertices_as_bytebuffer(arena* a, vertex_format format, vertex_darray* vertices) {}
+
+// --- TEMP
+bool gpu_backend_begin_frame() {
+ glClearColor(0.1f, 0.1f, 0.1f, 1.0f);
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+ return true;
+}
+void gpu_backend_end_frame() {
+ // TODO: Reset all bindings
+ glfwSwapBuffers(context.window);
+}
+void gpu_temp_draw(size_t n_verts) {}
+
+u32 shader_create_separate(const char* vert_shader, const char* frag_shader) {
+ INFO("Load shaders at %s and %s", vert_shader, frag_shader);
+ int success;
+ char info_log[512];
+
+ u32 vertex = glCreateShader(GL_VERTEX_SHADER);
+ const char* vertex_shader_src = string_from_file(vert_shader);
+ if (vertex_shader_src == NULL) {
+ ERROR("EXIT: couldnt load shader");
+ exit(-1);
+ }
+ glShaderSource(vertex, 1, &vertex_shader_src, NULL);
+ glCompileShader(vertex);
+ glGetShaderiv(vertex, GL_COMPILE_STATUS, &success);
+ if (!success) {
+ glGetShaderInfoLog(vertex, 512, NULL, info_log);
+ printf("%s\n", info_log);
+ ERROR("EXIT: vertex shader compilation failed");
+ exit(-1);
+ }
+
+ // fragment shader
+ u32 fragment = glCreateShader(GL_FRAGMENT_SHADER);
+ const char* fragment_shader_src = string_from_file(frag_shader);
+ if (fragment_shader_src == NULL) {
+ ERROR("EXIT: couldnt load shader");
+ exit(-1);
+ }
+ glShaderSource(fragment, 1, &fragment_shader_src, NULL);
+ glCompileShader(fragment);
+ glGetShaderiv(fragment, GL_COMPILE_STATUS, &success);
+ if (!success) {
+ glGetShaderInfoLog(fragment, 512, NULL, info_log);
+ printf("%s\n", info_log);
+ ERROR("EXIT: fragment shader compilation failed");
+ exit(-1);
+ }
+
+ u32 shader_prog;
+ shader_prog = glCreateProgram();
+
+ glAttachShader(shader_prog, vertex);
+ glAttachShader(shader_prog, fragment);
+ glLinkProgram(shader_prog);
+ glDeleteShader(vertex);
+ glDeleteShader(fragment);
+ free((char*)vertex_shader_src);
+ free((char*)fragment_shader_src);
+
+ return shader_prog;
+}
+
+inline void uniform_vec3f(u32 program_id, const char* uniform_name, vec3* value) {
+ glUniform3fv(glGetUniformLocation(program_id, uniform_name), 1, &value->x);
+}
+inline void uniform_f32(u32 program_id, const char* uniform_name, f32 value) {
+ glUniform1f(glGetUniformLocation(program_id, uniform_name), value);
+}
+inline void uniform_i32(u32 program_id, const char* uniform_name, i32 value) {
+ glUniform1i(glGetUniformLocation(program_id, uniform_name), value);
+}
+inline void uniform_mat4f(u32 program_id, const char* uniform_name, mat4* value) {
+ glUniformMatrix4fv(glGetUniformLocation(program_id, uniform_name), 1, GL_FALSE, value->data);
+}
+
+// void clear_screen(vec3 colour) {
+// glClearColor(colour.x, colour.y, colour.z, 1.0f);
+// glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+// }
+
+// void texture_data_upload(texture *tex) {
+// printf("Texture name %s\n", tex->name);
+// TRACE("Upload texture data");
+// u32 texture_id;
+// glGenTextures(1, &texture_id);
+// glBindTexture(GL_TEXTURE_2D, texture_id);
+// tex->texture_id = texture_id;
+
+// // set the texture wrapping parameters
+// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,
+// GL_REPEAT); // set texture wrapping to GL_REPEAT (default wrapping method)
+// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
+// // set texture filtering parameters
+// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
+// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+
+// glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, tex->width, tex->height, 0, tex->channel_type,
+// GL_UNSIGNED_BYTE, tex->image_data);
+// glGenerateMipmap(GL_TEXTURE_2D);
+// DEBUG("Freeing texture image data after uploading to GPU");
+// // stbi_image_free(tex->image_data); // data is on gpu now so we dont need it around
+// }
+
+// void bind_texture(shader s, texture *tex, u32 slot) {
+// // printf("bind texture slot %d with texture id %d \n", slot, tex->texture_id);
+// glActiveTexture(GL_TEXTURE0 + slot);
+// glBindTexture(GL_TEXTURE_2D, tex->texture_id);
+// }
+
+// void bind_mesh_vertex_buffer(void *_backend, mesh *mesh) { glBindVertexArray(mesh->vao); }
+
+// static inline GLenum to_gl_prim_topology(enum cel_primitive_topology primitive) {
+// switch (primitive) {
+// case CEL_PRIMITIVE_TOPOLOGY_TRIANGLE:
+// return GL_TRIANGLES;
+// case CEL_PRIMITIVE_TOPOLOGY_POINT:
+// case CEL_PRIMITIVE_TOPOLOGY_LINE:
+// case CEL_PRIMITIVE_TOPOLOGY_LINE_STRIP:
+// case CEL_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
+// case CEL_PRIMITIVE_TOPOLOGY_COUNT:
+// break;
+// }
+// }
+#endif
diff --git a/src/render/backends/opengl/backend_opengl.h b/src/render/backends/opengl/backend_opengl.h
new file mode 100644
index 0000000..8b88cf8
--- /dev/null
+++ b/src/render/backends/opengl/backend_opengl.h
@@ -0,0 +1,68 @@
+#pragma once
+
+#ifdef CEL_REND_BACKEND_OPENGL
+
+#include "defines.h"
+#include "maths_types.h"
+#include "ral.h"
+#include "ral_types.h"
+
+#define MAX_PIPELINE_UNIFORM_BUFFERS 32
+
+#define OPENGL_DEFAULT_FRAMEBUFFER 0
+
+typedef struct gpu_swapchain {
+ u32x2 dimensions;
+} gpu_swapchain;
+typedef struct gpu_device {
+} gpu_device;
+typedef struct gpu_pipeline_layout {
+ void *pad
+} gpu_pipeline_layout;
+typedef struct gpu_pipeline {
+ u32 shader_id;
+ gpu_renderpass* renderpass;
+ vertex_description vertex_desc;
+ buffer_handle uniform_bindings[MAX_PIPELINE_UNIFORM_BUFFERS];
+ u32 uniform_count;
+ bool wireframe;
+} gpu_pipeline;
+typedef struct gpu_renderpass {
+ u32 fbo;
+ gpu_renderpass_desc description;
+} gpu_renderpass;
+typedef struct gpu_cmd_encoder {
+ gpu_pipeline *pipeline;
+} gpu_cmd_encoder; // Recording
+typedef struct gpu_cmd_buffer {
+ void *pad;
+} gpu_cmd_buffer; // Ready for submission
+
+typedef struct gpu_buffer {
+ union {
+ u32 vbo;
+ u32 ibo;
+ u32 ubo;
+ } id;
+ union {
+ u32 vao;
+ u32 ubo_binding_point
+ }; // Optional
+ char* name;
+ u64 size;
+} gpu_buffer;
+typedef struct gpu_texture {
+ u32 id;
+ void* pad;
+} gpu_texture;
+
+typedef struct opengl_support {
+} opengl_support;
+
+u32 shader_create_separate(const char *vert_shader, const char *frag_shader);
+
+void uniform_vec3f(u32 program_id, const char *uniform_name, vec3 *value);
+void uniform_f32(u32 program_id, const char *uniform_name, f32 value);
+void uniform_i32(u32 program_id, const char *uniform_name, i32 value);
+void uniform_mat4f(u32 program_id, const char *uniform_name, mat4 *value);
+#endif
diff --git a/src/render/backends/opengl/opengl_helpers.h b/src/render/backends/opengl/opengl_helpers.h
new file mode 100644
index 0000000..41018cb
--- /dev/null
+++ b/src/render/backends/opengl/opengl_helpers.h
@@ -0,0 +1,74 @@
+#if defined(CEL_REND_BACKEND_OPENGL)
+#pragma once
+#include "backend_opengl.h"
+#include "log.h"
+#include "ral.h"
+#include "ral_types.h"
+
+#include <glad/glad.h>
+#include <glfw3.h>
+#include "ral_types.h"
+typedef struct opengl_vertex_attr {
+ u32 count;
+ GLenum data_type;
+} opengl_vertex_attr;
+
+static opengl_vertex_attr format_from_vertex_attr(vertex_attrib_type attr) {
+ switch (attr) {
+ case ATTR_F32:
+ return (opengl_vertex_attr){ .count = 1, .data_type = GL_FLOAT };
+ case ATTR_U32:
+ return (opengl_vertex_attr){ .count = 1, .data_type = GL_UNSIGNED_INT };
+ case ATTR_I32:
+ return (opengl_vertex_attr){ .count = 1, .data_type = GL_INT };
+ case ATTR_F32x2:
+ return (opengl_vertex_attr){ .count = 2, .data_type = GL_FLOAT };
+ case ATTR_U32x2:
+ // return VK_FORMAT_R32G32_UINT;
+ case ATTR_I32x2:
+ // return VK_FORMAT_R32G32_UINT;
+ case ATTR_F32x3:
+ return (opengl_vertex_attr){ .count = 3, .data_type = GL_FLOAT };
+ case ATTR_U32x3:
+ // return VK_FORMAT_R32G32B32_UINT;
+ case ATTR_I32x3:
+ // return VK_FORMAT_R32G32B32_SINT;
+ case ATTR_F32x4:
+ return (opengl_vertex_attr){ .count = 4, .data_type = GL_FLOAT };
+ case ATTR_U32x4:
+ // return VK_FORMAT_R32G32B32A32_UINT;
+ case ATTR_I32x4:
+ return (opengl_vertex_attr){ .count = 4, .data_type = GL_INT };
+ }
+}
+
+static u32 opengl_bindcreate_vao(gpu_buffer* buf, vertex_description desc) {
+ DEBUG("Vertex format name %s", desc.debug_label);
+ // 1. Bind the buffer
+ glBindBuffer(GL_ARRAY_BUFFER, buf->id.vbo);
+ // 2. Create new VAO
+ u32 vao;
+ glGenVertexArrays(1, &vao);
+ glBindVertexArray(vao);
+
+ // Attributes
+ u32 attr_count = desc.attributes_count;
+ printf("N attributes %d\n", attr_count);
+ u64 offset = 0;
+ size_t vertex_size = desc.use_full_vertex_size ? sizeof(vertex) : desc.stride;
+ for (u32 i = 0; i < desc.attributes_count; i++) {
+ opengl_vertex_attr format = format_from_vertex_attr(desc.attributes[i]);
+ glVertexAttribPointer(i, format.count, format.data_type, GL_FALSE, vertex_size, (void*)offset);
+ TRACE(" %d %d %d %d %d %s", i, format.count, format.data_type, vertex_size, offset,
+ desc.attr_names[i]);
+ glEnableVertexAttribArray(i); // nth index
+ size_t this_offset = vertex_attrib_size(desc.attributes[i]);
+ printf("offset total %lld this attr %ld\n", offset, this_offset);
+ offset += this_offset;
+ }
+ glBindBuffer(GL_ARRAY_BUFFER, 0);
+
+ return vao;
+}
+
+#endif
diff --git a/src/render/backends/vulkan/README.md b/src/render/backends/vulkan/README.md
new file mode 100644
index 0000000..220ed64
--- /dev/null
+++ b/src/render/backends/vulkan/README.md
@@ -0,0 +1 @@
+# Vulkan Backend Overview \ No newline at end of file
diff --git a/src/render/backends/vulkan/backend_vulkan.c b/src/render/backends/vulkan/backend_vulkan.c
new file mode 100644
index 0000000..8801230
--- /dev/null
+++ b/src/render/backends/vulkan/backend_vulkan.c
@@ -0,0 +1,1705 @@
+#include "defines.h"
+#if defined(CEL_REND_BACKEND_VULKAN)
+
+#define GLFW_INCLUDE_VULKAN
+#include <glfw3.h>
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <vulkan/vk_platform.h>
+#include <vulkan/vulkan.h>
+#include <vulkan/vulkan_core.h>
+
+#include "backend_vulkan.h"
+#include "buf.h"
+#include "darray.h"
+#include "maths_types.h"
+#include "mem.h"
+#include "ral_types.h"
+#include "str.h"
+#include "vulkan_helpers.h"
+
+#include "file.h"
+#include "log.h"
+#include "ral.h"
+#include "utils.h"
+
+// TEMP
+#define SCREEN_WIDTH 1000
+#define SCREEN_HEIGHT 1000
+#define VULKAN_QUEUES_COUNT 2
+#define MAX_DESCRIPTOR_SETS 10
+
+const char* queue_names[VULKAN_QUEUES_COUNT] = { "GRAPHICS", "TRANSFER" };
+
+KITC_DECL_TYPED_ARRAY(VkDescriptorSet)
+
+typedef struct vulkan_context {
+ VkInstance instance;
+ VkAllocationCallbacks* allocator;
+ VkSurfaceKHR surface;
+ vulkan_swapchain_support_info swapchain_support;
+
+ arena temp_arena;
+ arena pool_arena;
+ gpu_device* device;
+ gpu_swapchain* swapchain;
+ u32 framebuffer_count;
+ VkFramebuffer*
+ swapchain_framebuffers; // TODO: Move this data into the swapchain as its own struct
+
+ u32 current_img_index;
+ u32 current_frame; // super important
+ gpu_cmd_encoder main_cmd_bufs[MAX_FRAMES_IN_FLIGHT];
+ VkSemaphore image_available_semaphores[MAX_FRAMES_IN_FLIGHT];
+ VkSemaphore render_finished_semaphores[MAX_FRAMES_IN_FLIGHT];
+ VkFence in_flight_fences[MAX_FRAMES_IN_FLIGHT];
+
+ // HACK
+ VkRenderPass main_renderpass;
+
+ u32 screen_width;
+ u32 screen_height;
+ bool is_resizing;
+ GLFWwindow* window;
+
+ // Storage
+ gpu_buffer buffers[1024];
+ size_t buffer_count;
+ VkDescriptorSet_darray* free_set_queue;
+ struct resource_pools* resource_pools;
+ gpu_backend_pools gpu_pools;
+
+ VkDebugUtilsMessengerEXT vk_debugger;
+} vulkan_context;
+
+static vulkan_context context;
+
+// --- Function forward declarations
+
+void backend_pools_init(arena* a, gpu_backend_pools* backend_pools);
+
+/** @brief Enumerates and selects the most appropriate graphics device */
+bool select_physical_device(gpu_device* out_device);
+
+bool is_physical_device_suitable(VkPhysicalDevice device);
+
+queue_family_indices find_queue_families(VkPhysicalDevice device);
+
+bool create_logical_device(gpu_device* out_device);
+void create_swapchain_framebuffers();
+void create_sync_objects();
+void create_descriptor_pools();
+size_t vertex_attrib_size(vertex_attrib_type attr);
+
+VkShaderModule create_shader_module(str8 spirv);
+
+/** @brief Helper function for creating array of all extensions we want */
+cstr_darray* get_all_extensions();
+
+VkImage vulkan_image_create(u32x2 dimensions, VkImageType image_type, VkFormat format,
+ VkImageUsageFlags usage);
+void vulkan_transition_image_layout(gpu_texture* texture, VkFormat format, VkImageLayout old_layout,
+ VkImageLayout new_layout);
+
+// --- Handy macros
+#define BUFFER_GET(h) (buffer_pool_get(&context.resource_pools->buffers, h))
+#define TEXTURE_GET(h) (texture_pool_get(&context.resource_pools->textures, h))
+
+bool gpu_backend_init(const char* window_name, GLFWwindow* window) {
+ memset(&context, 0, sizeof(vulkan_context));
+ context.allocator = 0; // TODO: use an allocator
+ context.screen_width = SCREEN_WIDTH;
+ context.screen_height = SCREEN_HEIGHT;
+ context.window = window;
+ context.current_img_index = 0;
+ context.current_frame = 0;
+ context.free_set_queue = VkDescriptorSet_darray_new(100);
+
+ // Create an allocator
+ size_t temp_arena_size = 1024 * 1024;
+ context.temp_arena = arena_create(malloc(temp_arena_size), temp_arena_size);
+
+ size_t pool_buffer_size = 1024 * 1024;
+ context.pool_arena = arena_create(malloc(pool_buffer_size), pool_buffer_size);
+
+ backend_pools_init(&context.pool_arena, &context.gpu_pools);
+
+ // Setup Vulkan instance
+ VkApplicationInfo app_info = { VK_STRUCTURE_TYPE_APPLICATION_INFO };
+ app_info.apiVersion = VK_API_VERSION_1_2;
+ app_info.pApplicationName = window_name;
+ app_info.applicationVersion = VK_MAKE_VERSION(1, 0, 0);
+ app_info.pEngineName = "Celeritas Engine";
+ app_info.engineVersion = VK_MAKE_VERSION(1, 0, 0);
+
+ VkInstanceCreateInfo create_info = { VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO };
+ create_info.pApplicationInfo = &app_info;
+
+ // Extensions
+ cstr_darray* required_extensions = cstr_darray_new(2);
+ // cstr_darray_push(required_extensions, VK_KHR_SURFACE_EXTENSION_NAME);
+
+ uint32_t count;
+ const char** extensions = glfwGetRequiredInstanceExtensions(&count);
+ for (u32 i = 0; i < count; i++) {
+ cstr_darray_push(required_extensions, extensions[i]);
+ }
+
+ cstr_darray_push(required_extensions, VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
+
+ DEBUG("Required extensions:");
+ for (u32 i = 0; i < cstr_darray_len(required_extensions); i++) {
+ DEBUG(" %s", required_extensions->data[i]);
+ }
+
+ create_info.enabledExtensionCount = cstr_darray_len(required_extensions);
+ create_info.ppEnabledExtensionNames = required_extensions->data;
+
+ // TODO: Validation layers
+ create_info.enabledLayerCount = 0;
+ create_info.ppEnabledLayerNames = NULL;
+
+ INFO("Validation layers enabled");
+ cstr_darray* desired_validation_layers = cstr_darray_new(1);
+ cstr_darray_push(desired_validation_layers, "VK_LAYER_KHRONOS_validation");
+
+ u32 n_available_layers = 0;
+ VK_CHECK(vkEnumerateInstanceLayerProperties(&n_available_layers, 0));
+ TRACE("%d available layers", n_available_layers);
+ VkLayerProperties* available_layers =
+ arena_alloc(&context.temp_arena, n_available_layers * sizeof(VkLayerProperties));
+ VK_CHECK(vkEnumerateInstanceLayerProperties(&n_available_layers, available_layers));
+
+ for (int i = 0; i < cstr_darray_len(desired_validation_layers); i++) {
+ // look through layers to make sure we can find the ones we want
+ bool found = false;
+ for (int j = 0; j < n_available_layers; j++) {
+ if (str8_equals(str8_cstr_view(desired_validation_layers->data[i]),
+ str8_cstr_view(available_layers[j].layerName))) {
+ found = true;
+ TRACE("Found layer %s", desired_validation_layers->data[i]);
+ break;
+ }
+ }
+
+ if (!found) {
+ FATAL("Required validation is missing %s", desired_validation_layers->data[i]);
+ return false;
+ }
+ }
+ INFO("All validation layers are present");
+ create_info.enabledLayerCount = cstr_darray_len(desired_validation_layers);
+ create_info.ppEnabledLayerNames = desired_validation_layers->data;
+
+ VkResult result = vkCreateInstance(&create_info, NULL, &context.instance);
+ if (result != VK_SUCCESS) {
+ ERROR("vkCreateInstance failed with result: %u", result);
+ return false;
+ }
+ TRACE("Vulkan Instance created");
+
+ DEBUG("Creating Vulkan debugger");
+ u32 log_severity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
+ VkDebugUtilsMessengerCreateInfoEXT debug_create_info = {
+ VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT
+ };
+ debug_create_info.messageSeverity = log_severity;
+ debug_create_info.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT;
+ debug_create_info.pfnUserCallback = vk_debug_callback;
+
+ PFN_vkCreateDebugUtilsMessengerEXT func =
+ (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(context.instance,
+ "vkCreateDebugUtilsMessengerEXT");
+ assert(func);
+ VK_CHECK(func(context.instance, &debug_create_info, context.allocator, &context.vk_debugger));
+ DEBUG("Vulkan Debugger created");
+
+ // Surface creation
+ VkSurfaceKHR surface;
+ VK_CHECK(glfwCreateWindowSurface(context.instance, window, NULL, &surface));
+ context.surface = surface;
+ TRACE("Vulkan Surface created");
+
+ return true;
+}
+
+void gpu_backend_shutdown() {
+ gpu_swapchain_destroy(context.swapchain);
+
+ vkDestroySurfaceKHR(context.instance, context.surface, context.allocator);
+ vkDestroyInstance(context.instance, context.allocator);
+ arena_free_storage(&context.temp_arena);
+}
+
+bool gpu_device_create(gpu_device* out_device) {
+ // First things first store this poitner from the renderer
+ context.device = out_device;
+
+ arena_save savept = arena_savepoint(&context.temp_arena);
+ // Physical device
+ if (!select_physical_device(out_device)) {
+ return false;
+ }
+ TRACE("Physical device selected");
+
+ // Logical device & Queues
+ create_logical_device(out_device);
+
+ // Create the command pool
+ VkCommandPoolCreateInfo pool_create_info = { VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO };
+ pool_create_info.queueFamilyIndex = out_device->queue_family_indicies.graphics_family_index;
+ pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
+ vkCreateCommandPool(out_device->logical_device, &pool_create_info, context.allocator,
+ &out_device->pool);
+ TRACE("Command Pool created");
+
+ // Synchronisation objects
+ create_sync_objects();
+ TRACE("Synchronisation primitives created");
+
+ arena_rewind(savept); // Free any temp data
+ return true;
+}
+
+bool gpu_swapchain_create(gpu_swapchain* out_swapchain) {
+ context.swapchain = out_swapchain;
+
+ out_swapchain->swapchain_arena = arena_create(malloc(1024), 1024);
+
+ vulkan_device_query_swapchain_support(context.device->physical_device, context.surface,
+ &context.swapchain_support);
+ vulkan_swapchain_support_info swapchain_support = context.swapchain_support;
+
+ // TODO: custom swapchain extents VkExtent2D swapchain_extent = { width, height };
+
+ VkSurfaceFormatKHR image_format = choose_swapchain_format(&swapchain_support);
+ out_swapchain->image_format = image_format;
+ VkPresentModeKHR present_mode = VK_PRESENT_MODE_FIFO_KHR; // guaranteed to be implemented
+ out_swapchain->present_mode = present_mode;
+
+ u32 image_count = swapchain_support.capabilities.minImageCount + 1;
+ out_swapchain->image_count = image_count;
+
+ VkSwapchainCreateInfoKHR swapchain_create_info = { VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR };
+ swapchain_create_info.surface = context.surface;
+ swapchain_create_info.minImageCount = image_count;
+ swapchain_create_info.imageFormat = image_format.format;
+ swapchain_create_info.imageColorSpace = image_format.colorSpace;
+ swapchain_create_info.imageExtent = swapchain_support.capabilities.currentExtent;
+ swapchain_create_info.imageArrayLayers = 1;
+ swapchain_create_info.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ swapchain_create_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ swapchain_create_info.queueFamilyIndexCount = 0;
+ swapchain_create_info.pQueueFamilyIndices = NULL;
+
+ swapchain_create_info.preTransform = swapchain_support.capabilities.currentTransform;
+ swapchain_create_info.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+ swapchain_create_info.presentMode = present_mode;
+ swapchain_create_info.clipped = VK_TRUE;
+ swapchain_create_info.oldSwapchain = VK_NULL_HANDLE;
+
+ out_swapchain->extent = swapchain_support.capabilities.currentExtent;
+
+ VK_CHECK(vkCreateSwapchainKHR(context.device->logical_device, &swapchain_create_info,
+ context.allocator, &out_swapchain->handle));
+ TRACE("Vulkan Swapchain created");
+
+ // Retrieve Images
+ // out_swapchain->images =
+ // arena_alloc(&out_swapchain->swapchain_arena, image_count * sizeof(VkImage));
+ out_swapchain->images = malloc(image_count * sizeof(VkImage));
+ VK_CHECK(vkGetSwapchainImagesKHR(context.device->logical_device, out_swapchain->handle,
+ &image_count, out_swapchain->images));
+
+ // Create ImageViews
+ // TODO: Move this to a separate function
+ out_swapchain->image_views = malloc(image_count * sizeof(VkImageView));
+ // arena_alloc(&out_swapchain->swapchain_arena, image_count * sizeof(VkImageView));
+ for (u32 i = 0; i < image_count; i++) {
+ VkImageViewCreateInfo view_create_info = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO };
+ view_create_info.image = out_swapchain->images[i];
+ view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
+ view_create_info.format = image_format.format;
+ view_create_info.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
+ view_create_info.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
+ view_create_info.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
+ view_create_info.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
+ view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ view_create_info.subresourceRange.baseMipLevel = 0;
+ view_create_info.subresourceRange.levelCount = 1;
+ view_create_info.subresourceRange.baseArrayLayer = 0;
+ view_create_info.subresourceRange.layerCount = 1;
+ vkCreateImageView(context.device->logical_device, &view_create_info, context.allocator,
+ &out_swapchain->image_views[i]);
+ }
+
+ return true;
+}
+
+void gpu_swapchain_destroy(gpu_swapchain* swapchain) {
+ // Destroy Framebuffers
+ DEBUG("Image count %d", swapchain->image_count);
+ for (u32 i = 0; i < swapchain->image_count; i++) {
+ DEBUG("Framebuffer handle %d", context.swapchain_framebuffers[i]);
+ vkDestroyFramebuffer(context.device->logical_device, context.swapchain_framebuffers[i],
+ context.allocator);
+ }
+ for (u32 i = 0; i < swapchain->image_count; i++) {
+ vkDestroyImageView(context.device->logical_device, swapchain->image_views[i],
+ context.allocator);
+ }
+ arena_free_all(&swapchain->swapchain_arena);
+ vkDestroySwapchainKHR(context.device->logical_device, swapchain->handle, context.allocator);
+ TRACE("Vulkan Swapchain destroyed");
+}
+
+static void recreate_swapchain(gpu_swapchain* swapchain) {
+ int width = 0, height = 0;
+ glfwGetFramebufferSize(context.window, &width, &height);
+ while (width == 0 || height == 0) {
+ glfwGetFramebufferSize(context.window, &width, &height);
+ glfwWaitEvents();
+ }
+ DEBUG("Recreating swapchain...");
+ vkDeviceWaitIdle(context.device->logical_device);
+
+ gpu_swapchain_destroy(swapchain);
+ gpu_swapchain_create(swapchain);
+ create_swapchain_framebuffers();
+}
+
+VkFormat format_from_vertex_attr(vertex_attrib_type attr) {
+ switch (attr) {
+ case ATTR_F32:
+ return VK_FORMAT_R32_SFLOAT;
+ case ATTR_U32:
+ return VK_FORMAT_R32_UINT;
+ case ATTR_I32:
+ return VK_FORMAT_R32_SINT;
+ case ATTR_F32x2:
+ return VK_FORMAT_R32G32_SFLOAT;
+ case ATTR_U32x2:
+ return VK_FORMAT_R32G32_UINT;
+ case ATTR_I32x2:
+ return VK_FORMAT_R32G32_UINT;
+ case ATTR_F32x3:
+ return VK_FORMAT_R32G32B32_SFLOAT;
+ case ATTR_U32x3:
+ return VK_FORMAT_R32G32B32_UINT;
+ case ATTR_I32x3:
+ return VK_FORMAT_R32G32B32_SINT;
+ case ATTR_F32x4:
+ return VK_FORMAT_R32G32B32A32_SFLOAT;
+ case ATTR_U32x4:
+ return VK_FORMAT_R32G32B32A32_UINT;
+ case ATTR_I32x4:
+ return VK_FORMAT_R32G32B32A32_SINT;
+ }
+}
+
+gpu_pipeline* gpu_graphics_pipeline_create(struct graphics_pipeline_desc description) {
+ TRACE("GPU Graphics Pipeline creation");
+ // Allocate
+ gpu_pipeline_layout* layout =
+ pipeline_layout_pool_alloc(&context.gpu_pools.pipeline_layouts, NULL);
+ gpu_pipeline* pipeline = pipeline_pool_alloc(&context.gpu_pools.pipelines, NULL);
+
+ // Shaders
+ printf("Vertex shader: %s\n", description.vs.filepath.buf);
+ printf("Fragment shader: %s\n", description.fs.filepath.buf);
+ VkShaderModule vertex_shader = create_shader_module(description.vs.code);
+ VkShaderModule fragment_shader = create_shader_module(description.fs.code);
+
+ // Vertex
+ VkPipelineShaderStageCreateInfo vert_shader_stage_info = {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
+ };
+ vert_shader_stage_info.stage = VK_SHADER_STAGE_VERTEX_BIT;
+ vert_shader_stage_info.module = vertex_shader;
+ vert_shader_stage_info.pName = "main";
+ // Fragment
+ VkPipelineShaderStageCreateInfo frag_shader_stage_info = {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
+ };
+ frag_shader_stage_info.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
+ frag_shader_stage_info.module = fragment_shader;
+ frag_shader_stage_info.pName = "main";
+
+ VkPipelineShaderStageCreateInfo shader_stages[2] = { vert_shader_stage_info,
+ frag_shader_stage_info };
+
+ // Attributes
+ u32 attr_count = description.vertex_desc.attributes_count;
+ printf("N attributes %d\n", attr_count);
+ VkVertexInputAttributeDescription attribute_descs[attr_count];
+ memset(attribute_descs, 0, attr_count * sizeof(VkVertexInputAttributeDescription));
+ u32 offset = 0;
+ for (u32 i = 0; i < description.vertex_desc.attributes_count; i++) {
+ attribute_descs[i].binding = 0;
+ attribute_descs[i].location = i;
+ attribute_descs[i].format = format_from_vertex_attr(description.vertex_desc.attributes[i]);
+ attribute_descs[i].offset = offset;
+ size_t this_offset = vertex_attrib_size(description.vertex_desc.attributes[i]);
+ printf("offset total %d this attr %ld\n", offset, this_offset);
+ printf("sizeof vertex %ld\n", sizeof(vertex));
+ offset += this_offset;
+ }
+
+ // Vertex input
+ // TODO: Generate this from descroiption now
+ VkVertexInputBindingDescription binding_desc;
+ binding_desc.binding = 0;
+ binding_desc.stride = description.vertex_desc.use_full_vertex_size
+ ? sizeof(vertex)
+ : description.vertex_desc.stride;
+ binding_desc.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
+
+ VkPipelineVertexInputStateCreateInfo vertex_input_info = {
+ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO
+ };
+ vertex_input_info.vertexBindingDescriptionCount = 1;
+ vertex_input_info.pVertexBindingDescriptions = &binding_desc;
+ vertex_input_info.vertexAttributeDescriptionCount =
+ attr_count; // description.vertex_desc.attributes_count;
+ vertex_input_info.pVertexAttributeDescriptions = attribute_descs;
+
+ // Input Assembly
+ VkPipelineInputAssemblyStateCreateInfo input_assembly = {
+ VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO
+ };
+ input_assembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
+ input_assembly.primitiveRestartEnable = VK_FALSE;
+
+ // Viewport
+ VkViewport viewport = { .x = 0,
+ .y = 0,
+ .width = (f32)context.swapchain->extent.width,
+ .height = (f32)context.swapchain->extent.height,
+ .minDepth = 0.0,
+ .maxDepth = 1.0 };
+ VkRect2D scissor = { .offset = { .x = 0, .y = 0 }, .extent = context.swapchain->extent };
+ VkPipelineViewportStateCreateInfo viewport_state = {
+ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
+ };
+ viewport_state.viewportCount = 1;
+ // viewport_state.pViewports = &viewport;
+ viewport_state.scissorCount = 1;
+ // viewport_state.pScissors = &scissor;
+
+ // Rasterizer
+ VkPipelineRasterizationStateCreateInfo rasterizer_create_info = {
+ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO
+ };
+ rasterizer_create_info.depthClampEnable = VK_FALSE;
+ rasterizer_create_info.rasterizerDiscardEnable = VK_FALSE;
+ rasterizer_create_info.polygonMode =
+ description.wireframe ? VK_POLYGON_MODE_LINE : VK_POLYGON_MODE_FILL;
+ rasterizer_create_info.lineWidth = 1.0f;
+ rasterizer_create_info.cullMode = VK_CULL_MODE_BACK_BIT;
+ rasterizer_create_info.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
+ /* rasterizer_create_info.frontFace = VK_FRONT_FACE_CLOCKWISE; */
+ rasterizer_create_info.depthBiasEnable = VK_FALSE;
+ rasterizer_create_info.depthBiasConstantFactor = 0.0;
+ rasterizer_create_info.depthBiasClamp = 0.0;
+ rasterizer_create_info.depthBiasSlopeFactor = 0.0;
+
+ // Multisampling
+ VkPipelineMultisampleStateCreateInfo ms_create_info = {
+ VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
+ };
+ ms_create_info.sampleShadingEnable = VK_FALSE;
+ ms_create_info.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
+ ms_create_info.minSampleShading = 1.0;
+ ms_create_info.pSampleMask = 0;
+ ms_create_info.alphaToCoverageEnable = VK_FALSE;
+ ms_create_info.alphaToOneEnable = VK_FALSE;
+
+ // TODO: Depth and stencil testing
+ // VkPipelineDepthStencilStateCreateInfo depth_stencil = {
+ // VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO
+ // };
+ // depth_stencil.depthTestEnable = description.depth_test ? VK_TRUE : VK_FALSE;
+ // depth_stencil.depthWriteEnable = description.depth_test ? VK_TRUE : VK_FALSE;
+ // depth_stencil.depthCompareOp = VK_COMPARE_OP_LESS;
+ // depth_stencil.depthBoundsTestEnable = VK_FALSE;
+ // depth_stencil.stencilTestEnable = VK_FALSE;
+ // depth_stencil.pNext = 0;
+
+ // Blending
+ VkPipelineColorBlendAttachmentState color_blend_attachment_state;
+ color_blend_attachment_state.blendEnable = VK_FALSE;
+ color_blend_attachment_state.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA;
+ color_blend_attachment_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
+ color_blend_attachment_state.colorBlendOp = VK_BLEND_OP_ADD;
+ color_blend_attachment_state.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA;
+ color_blend_attachment_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
+ color_blend_attachment_state.alphaBlendOp = VK_BLEND_OP_ADD;
+ color_blend_attachment_state.colorWriteMask = VK_COLOR_COMPONENT_R_BIT |
+ VK_COLOR_COMPONENT_G_BIT |
+ VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
+
+ VkPipelineColorBlendStateCreateInfo color_blend = {
+ VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO
+ };
+ color_blend.logicOpEnable = VK_FALSE;
+ color_blend.logicOp = VK_LOGIC_OP_COPY;
+ color_blend.attachmentCount = 1;
+ color_blend.pAttachments = &color_blend_attachment_state;
+
+// Dynamic state
+#define DYNAMIC_STATE_COUNT 2
+ VkDynamicState dynamic_states[DYNAMIC_STATE_COUNT] = {
+ VK_DYNAMIC_STATE_VIEWPORT,
+ VK_DYNAMIC_STATE_SCISSOR,
+ };
+
+ VkPipelineDynamicStateCreateInfo dynamic_state = {
+ VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO
+ };
+ dynamic_state.dynamicStateCount = DYNAMIC_STATE_COUNT;
+ dynamic_state.pDynamicStates = dynamic_states;
+
+ // Descriptor Set layouts
+
+ VkDescriptorSetLayout* desc_set_layouts =
+ malloc(description.data_layouts_count * sizeof(VkDescriptorSetLayout));
+ pipeline->desc_set_layouts = desc_set_layouts;
+ pipeline->desc_set_layouts_count = description.data_layouts_count;
+ if (description.data_layouts_count > 0) {
+ pipeline->uniform_pointers =
+ malloc(description.data_layouts_count * sizeof(desc_set_uniform_buffer));
+ } else {
+ pipeline->uniform_pointers = NULL;
+ }
+
+ // assert(description.data_layouts_count == 1);
+ printf("data layouts %d\n", description.data_layouts_count);
+ for (u32 layout_i = 0; layout_i < description.data_layouts_count; layout_i++) {
+ shader_data_layout sdl = description.data_layouts[layout_i].shader_data_get_layout(NULL);
+ TRACE("Got shader data layout %d's bindings! . found %d", layout_i, sdl.bindings_count);
+
+ VkDescriptorSetLayoutBinding desc_set_bindings[sdl.bindings_count];
+
+ // Bindings
+ assert(sdl.bindings_count == 2);
+ for (u32 binding_j = 0; binding_j < sdl.bindings_count; binding_j++) {
+ desc_set_bindings[binding_j].binding = binding_j;
+ desc_set_bindings[binding_j].descriptorCount = 1;
+ switch (sdl.bindings[binding_j].type) {
+ case SHADER_BINDING_BUFFER:
+ case SHADER_BINDING_BYTES:
+ desc_set_bindings[binding_j].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ desc_set_bindings[binding_j].stageFlags =
+ VK_SHADER_STAGE_VERTEX_BIT; // FIXME: dont hardcode
+
+ u64 buffer_size = sdl.bindings[binding_j].data.bytes.size;
+ VkDeviceSize uniform_buf_size = buffer_size;
+ // TODO: Create backing buffer
+
+ VkBuffer buffers[MAX_FRAMES_IN_FLIGHT];
+ VkDeviceMemory uniform_buf_memorys[MAX_FRAMES_IN_FLIGHT];
+ void* uniform_buf_mem_mappings[MAX_FRAMES_IN_FLIGHT];
+ // void* s?
+ for (size_t frame_i = 0; frame_i < MAX_FRAMES_IN_FLIGHT; frame_i++) {
+ buffer_handle uniform_buf_handle =
+ gpu_buffer_create(buffer_size, CEL_BUFFER_UNIFORM, CEL_BUFFER_FLAG_CPU, NULL);
+
+ gpu_buffer* created_gpu_buffer =
+ BUFFER_GET(uniform_buf_handle); // context.buffers[uniform_buf_handle.raw];
+ buffers[frame_i] = created_gpu_buffer->handle;
+ uniform_buf_memorys[frame_i] = created_gpu_buffer->memory;
+ vkMapMemory(context.device->logical_device, uniform_buf_memorys[frame_i], 0,
+ uniform_buf_size, 0, &uniform_buf_mem_mappings[frame_i]);
+ // now we have a pointer in unifrom_buf_mem_mappings we can write to
+ }
+
+ desc_set_uniform_buffer uniform_data;
+ memcpy(&uniform_data.buffers, &buffers, sizeof(buffers));
+ memcpy(&uniform_data.uniform_buf_memorys, &uniform_buf_memorys,
+ sizeof(uniform_buf_memorys));
+ memcpy(&uniform_data.uniform_buf_mem_mappings, &uniform_buf_mem_mappings,
+ sizeof(uniform_buf_mem_mappings));
+ uniform_data.size = buffer_size;
+
+ pipeline->uniform_pointers[binding_j] = uniform_data;
+
+ break;
+ case SHADER_BINDING_TEXTURE:
+ desc_set_bindings[binding_j].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ desc_set_bindings[binding_j].stageFlags =
+ VK_SHADER_STAGE_FRAGMENT_BIT; // FIXME: dont hardcode
+ desc_set_bindings[binding_j].pImmutableSamplers = NULL;
+
+ break;
+ default:
+ ERROR_EXIT("Unimplemented binding type!! in backend_vulkan");
+ }
+ switch (sdl.bindings[binding_j].vis) {
+ case VISIBILITY_VERTEX:
+ desc_set_bindings[binding_j].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
+ break;
+ case VISIBILITY_FRAGMENT:
+ desc_set_bindings[binding_j].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
+ break;
+ case VISIBILITY_COMPUTE:
+ WARN("Compute is not implemented yet");
+ break;
+ }
+ }
+
+ VkDescriptorSetLayoutCreateInfo desc_set_layout_info = {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
+ };
+ desc_set_layout_info.bindingCount = sdl.bindings_count;
+ desc_set_layout_info.pBindings = desc_set_bindings;
+
+ VK_CHECK(vkCreateDescriptorSetLayout(context.device->logical_device, &desc_set_layout_info,
+ context.allocator, &desc_set_layouts[layout_i]));
+ }
+ printf("Descriptor set layouts\n");
+
+ // Layout
+ VkPipelineLayoutCreateInfo pipeline_layout_create_info = {
+ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
+ };
+ pipeline_layout_create_info.setLayoutCount = description.data_layouts_count;
+ pipeline_layout_create_info.pSetLayouts = desc_set_layouts;
+ pipeline_layout_create_info.pushConstantRangeCount = 0;
+ pipeline_layout_create_info.pPushConstantRanges = NULL;
+ VK_CHECK(vkCreatePipelineLayout(context.device->logical_device, &pipeline_layout_create_info,
+ context.allocator, &layout->handle));
+ pipeline->layout_handle = layout->handle; // keep a copy of the layout on the pipeline object
+
+ VkGraphicsPipelineCreateInfo pipeline_create_info = {
+ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
+ };
+
+ pipeline_create_info.stageCount = 2;
+ pipeline_create_info.pStages = shader_stages;
+ pipeline_create_info.pVertexInputState = &vertex_input_info;
+ pipeline_create_info.pInputAssemblyState = &input_assembly;
+
+ pipeline_create_info.pViewportState = &viewport_state;
+ pipeline_create_info.pRasterizationState = &rasterizer_create_info;
+ pipeline_create_info.pMultisampleState = &ms_create_info;
+ pipeline_create_info.pDepthStencilState = NULL; // &depth_stencil;
+ pipeline_create_info.pColorBlendState = &color_blend;
+ pipeline_create_info.pDynamicState = &dynamic_state;
+ pipeline_create_info.pTessellationState = 0;
+
+ pipeline_create_info.layout = layout->handle;
+
+ pipeline_create_info.renderPass = description.renderpass->handle;
+ pipeline_create_info.subpass = 0;
+ pipeline_create_info.basePipelineHandle = VK_NULL_HANDLE;
+ pipeline_create_info.basePipelineIndex = -1;
+
+ printf("About to create graphics pipeline\n");
+
+ VkResult result =
+ vkCreateGraphicsPipelines(context.device->logical_device, VK_NULL_HANDLE, 1,
+ &pipeline_create_info, context.allocator, &pipeline->handle);
+ if (result != VK_SUCCESS) {
+ FATAL("graphics pipeline creation failed. its fked mate");
+ ERROR_EXIT("Doomed");
+ }
+ TRACE("Vulkan Graphics pipeline created");
+
+ // once the pipeline has been created we can destroy these
+ vkDestroyShaderModule(context.device->logical_device, vertex_shader, context.allocator);
+ vkDestroyShaderModule(context.device->logical_device, fragment_shader, context.allocator);
+
+ // Framebuffers
+ create_swapchain_framebuffers();
+ TRACE("Swapchain Framebuffers created");
+
+ for (u32 frame_i = 0; frame_i < MAX_FRAMES_IN_FLIGHT; frame_i++) {
+ context.main_cmd_bufs[frame_i] = gpu_cmd_encoder_create();
+ }
+ TRACE("main Command Buffer created");
+
+ TRACE("Graphics pipeline created");
+ return pipeline;
+}
+
+void gpu_pipeline_destroy(gpu_pipeline* pipeline) {
+ vkDestroyPipeline(context.device->logical_device, pipeline->handle, context.allocator);
+ vkDestroyPipelineLayout(context.device->logical_device, pipeline->layout_handle,
+ context.allocator);
+}
+
+gpu_cmd_encoder* gpu_get_default_cmd_encoder() {
+ return &context.main_cmd_bufs[context.current_frame];
+}
+
+gpu_renderpass* gpu_renderpass_create(const gpu_renderpass_desc* description) {
+ gpu_renderpass* renderpass = renderpass_pool_alloc(&context.gpu_pools.renderpasses, NULL);
+
+ // attachments
+ u32 attachment_desc_count = 2;
+ VkAttachmentDescription attachment_descriptions[2];
+
+ // Colour attachment
+ VkAttachmentDescription color_attachment;
+ color_attachment.format = context.swapchain->image_format.format;
+ color_attachment.samples = VK_SAMPLE_COUNT_1_BIT;
+ color_attachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ color_attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ color_attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ color_attachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ color_attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ color_attachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
+ color_attachment.flags = 0;
+
+ attachment_descriptions[0] = color_attachment;
+
+ VkAttachmentReference color_attachment_reference;
+ color_attachment_reference.attachment = 0;
+ color_attachment_reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+ // Depth attachment
+ u32x2 ext = { .x = context.swapchain_support.capabilities.currentExtent.width,
+ .y = context.swapchain_support.capabilities.currentExtent.height };
+ texture_desc depth_desc = { .extents = ext,
+ .format = CEL_TEXTURE_FORMAT_DEPTH_DEFAULT,
+ .tex_type = CEL_TEXTURE_TYPE_2D };
+ texture_handle depth_texture_handle = gpu_texture_create(depth_desc, true, NULL);
+ gpu_texture* depth = TEXTURE_GET(depth_texture_handle);
+
+ VkAttachmentDescription depth_attachment;
+ depth_attachment.format = // TODO: context->device.depth_format;
+ depth_attachment.samples = VK_SAMPLE_COUNT_1_BIT;
+ depth_attachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ depth_attachment.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ depth_attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ depth_attachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ depth_attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ depth_attachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ depth_attachment.flags = 0;
+
+ attachment_descriptions[1] = depth_attachment;
+
+ VkAttachmentReference depth_attachment_reference;
+ depth_attachment_reference.attachment = 1;
+ depth_attachment_reference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+
+ // main subpass
+ VkSubpassDescription subpass = { 0 };
+ subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ subpass.colorAttachmentCount = 1;
+ subpass.pColorAttachments = &color_attachment_reference;
+
+ // sets everything up
+ // renderpass dependencies
+ VkSubpassDependency dependency;
+ dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
+ dependency.dstSubpass = 0;
+ dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ dependency.srcAccessMask = 0;
+ dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ dependency.dstAccessMask =
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ dependency.dependencyFlags = 0;
+
+ // Finally, create the RenderPass
+ VkRenderPassCreateInfo render_pass_create_info = { VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO };
+ render_pass_create_info.attachmentCount = 1;
+ render_pass_create_info.pAttachments = &color_attachment;
+ render_pass_create_info.subpassCount = 1;
+ render_pass_create_info.pSubpasses = &subpass;
+ render_pass_create_info.dependencyCount = 1;
+ render_pass_create_info.pDependencies = &dependency;
+ render_pass_create_info.flags = 0;
+ render_pass_create_info.pNext = 0;
+
+ VK_CHECK(vkCreateRenderPass(context.device->logical_device, &render_pass_create_info,
+ context.allocator, &renderpass->handle));
+
+ // HACK
+ context.main_renderpass = renderpass->handle;
+
+ return renderpass;
+}
+
+gpu_cmd_encoder gpu_cmd_encoder_create() {
+ // gpu_cmd_encoder* encoder = malloc(sizeof(gpu_cmd_encoder)); // TODO: fix leaking mem
+ gpu_cmd_encoder encoder = { 0 };
+
+ VkCommandBufferAllocateInfo allocate_info = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO };
+ allocate_info.commandPool = context.device->pool;
+ allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
+ allocate_info.commandBufferCount = 1;
+ allocate_info.pNext = NULL;
+
+ VK_CHECK(vkAllocateCommandBuffers(context.device->logical_device, &allocate_info,
+ &encoder.cmd_buffer););
+
+ VkDescriptorPoolSize pool_sizes[2];
+ // Uniforms pool
+ pool_sizes[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ pool_sizes[0].descriptorCount = MAX_FRAMES_IN_FLIGHT * MAX_DESCRIPTOR_SETS;
+ // Samplers pool
+ pool_sizes[1].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ pool_sizes[1].descriptorCount = MAX_FRAMES_IN_FLIGHT * MAX_DESCRIPTOR_SETS;
+
+ VkDescriptorPoolCreateInfo pool_info = { VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO };
+ pool_info.poolSizeCount = 2;
+ pool_info.pPoolSizes = pool_sizes;
+ pool_info.maxSets = 100;
+
+ VK_CHECK(vkCreateDescriptorPool(context.device->logical_device, &pool_info, context.allocator,
+ &encoder.descriptor_pool));
+
+ return encoder;
+}
+void gpu_cmd_encoder_destroy(gpu_cmd_encoder* encoder) {
+ vkFreeCommandBuffers(context.device->logical_device, context.device->pool, 1,
+ &encoder->cmd_buffer);
+}
+
+void gpu_cmd_encoder_begin(gpu_cmd_encoder encoder) {
+ VK_CHECK(vkResetDescriptorPool(context.device->logical_device, encoder.descriptor_pool, 0));
+
+ VkCommandBufferBeginInfo begin_info = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO };
+ VK_CHECK(vkBeginCommandBuffer(encoder.cmd_buffer, &begin_info));
+}
+
+void gpu_cmd_encoder_begin_render(gpu_cmd_encoder* encoder, gpu_renderpass* renderpass) {
+ VkRenderPassBeginInfo begin_info = { VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO };
+ begin_info.renderPass = renderpass->handle;
+ /* printf("Current img: %d Current frame %d\n", context.current_img_index, context.current_frame);
+ */
+ begin_info.framebuffer = context.swapchain_framebuffers[context.current_img_index];
+ begin_info.renderArea.offset = (VkOffset2D){ 0, 0 };
+ begin_info.renderArea.extent = context.swapchain->extent;
+
+ // VkClearValue clear_values[2];
+ VkClearValue clear_color = { { { 0.02f, 0.02f, 0.02f, 1.0f } } };
+ // clear_values[1].depthStencil.depth = renderpass->depth;
+ // clear_values[1].depthStencil.stencil = renderpass->stencil;
+
+ begin_info.clearValueCount = 1;
+ begin_info.pClearValues = &clear_color;
+
+ vkCmdBeginRenderPass(encoder->cmd_buffer, &begin_info, VK_SUBPASS_CONTENTS_INLINE);
+ // command_buffer->state = COMMAND_BUFFER_STATE_IN_RENDER_PASS;
+}
+
+void gpu_cmd_encoder_end_render(gpu_cmd_encoder* encoder) {
+ vkCmdEndRenderPass(encoder->cmd_buffer);
+}
+
+gpu_cmd_buffer gpu_cmd_encoder_finish(gpu_cmd_encoder* encoder) {
+ vkEndCommandBuffer(encoder->cmd_buffer);
+
+ // TEMP: submit
+ return (gpu_cmd_buffer){ .cmd_buffer = encoder->cmd_buffer };
+}
+
+// --- Binding
+void encode_bind_pipeline(gpu_cmd_encoder* encoder, pipeline_kind kind, gpu_pipeline* pipeline) {
+ vkCmdBindPipeline(encoder->cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline->handle);
+ encoder->pipeline = pipeline;
+}
+
+void encode_bind_shader_data(gpu_cmd_encoder* encoder, u32 group, shader_data* data) {
+ arena tmp = arena_create(malloc(1024), 1024);
+
+ assert(data->data != NULL);
+
+ // Update the local buffer
+ desc_set_uniform_buffer ubo = encoder->pipeline->uniform_pointers[group];
+ memcpy(ubo.uniform_buf_mem_mappings[context.current_frame], data->data, ubo.size);
+
+ VkDescriptorSetAllocateInfo alloc_info = { VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO };
+ alloc_info.descriptorPool = encoder->descriptor_pool;
+ alloc_info.descriptorSetCount = 1;
+ alloc_info.pSetLayouts = &encoder->pipeline->desc_set_layouts[group];
+
+ shader_data_layout sdl = data->shader_data_get_layout(data->data);
+ size_t binding_count = sdl.bindings_count;
+ assert(binding_count == 2);
+
+ VkDescriptorSet sets[0];
+ VK_CHECK(vkAllocateDescriptorSets(context.device->logical_device, &alloc_info, sets));
+ // FIXME: hardcoded
+ VkDescriptorSet_darray_push(context.free_set_queue, sets[0]);
+ /* VkDescriptorSet_darray_push(context.free_set_queue, sets[1]); */
+
+ VkWriteDescriptorSet write_sets[binding_count];
+ memset(&write_sets, 0, binding_count * sizeof(VkWriteDescriptorSet));
+
+ for (u32 i = 0; i < sdl.bindings_count; i++) {
+ shader_binding binding = sdl.bindings[i];
+
+ if (binding.type == SHADER_BINDING_BUFFER || binding.type == SHADER_BINDING_BYTES) {
+ VkDescriptorBufferInfo* buffer_info = arena_alloc(&tmp, sizeof(VkDescriptorBufferInfo));
+ buffer_info->buffer = ubo.buffers[context.current_frame];
+ buffer_info->offset = 0;
+ buffer_info->range = binding.data.bytes.size;
+
+ write_sets[i].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ write_sets[i].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ write_sets[i].descriptorCount = 1;
+ write_sets[i].dstSet = sets[0];
+ write_sets[i].dstBinding = i;
+ write_sets[i].dstArrayElement = 0;
+ write_sets[i].pBufferInfo = buffer_info;
+ } else if (binding.type == SHADER_BINDING_TEXTURE) {
+ gpu_texture* texture = TEXTURE_GET(binding.data.texture.handle);
+ VkDescriptorImageInfo* image_info = arena_alloc(&tmp, sizeof(VkDescriptorImageInfo));
+ image_info->imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ image_info->imageView = texture->view;
+ image_info->sampler = texture->sampler;
+
+ write_sets[i].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ write_sets[i].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ write_sets[i].descriptorCount = 1;
+ write_sets[i].dstSet = sets[0];
+ write_sets[i].dstBinding = i;
+ write_sets[i].dstArrayElement = 0;
+ write_sets[i].pImageInfo = image_info;
+ } else {
+ WARN("Unknown binding");
+ }
+ }
+
+ // Update
+ vkUpdateDescriptorSets(context.device->logical_device, binding_count, write_sets, 0, NULL);
+
+ // Bind
+ vkCmdBindDescriptorSets(encoder->cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
+ encoder->pipeline->layout_handle, 0, 1, sets, 0, NULL);
+
+ arena_free_storage(&tmp);
+}
+
+void encode_set_vertex_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) {
+ gpu_buffer* buffer = BUFFER_GET(buf); // context.buffers[buf.raw];
+ VkBuffer vbs[] = { buffer->handle };
+ VkDeviceSize offsets[] = { 0 };
+ vkCmdBindVertexBuffers(encoder->cmd_buffer, 0, 1, vbs, offsets);
+}
+
+void encode_set_index_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) {
+ gpu_buffer* buffer = BUFFER_GET(buf); // context.buffers[buf.raw];
+ vkCmdBindIndexBuffer(encoder->cmd_buffer, buffer->handle, 0, VK_INDEX_TYPE_UINT32);
+}
+
+// TEMP
+void encode_set_default_settings(gpu_cmd_encoder* encoder) {
+ VkViewport viewport = { 0 };
+ viewport.x = 0.0f;
+ viewport.y = 0.0f;
+ viewport.width = context.swapchain->extent.width;
+ viewport.height = context.swapchain->extent.height;
+ viewport.minDepth = 0.0f;
+ viewport.maxDepth = 1.0f;
+ vkCmdSetViewport(encoder->cmd_buffer, 0, 1, &viewport);
+
+ VkRect2D scissor = { 0 };
+ scissor.offset = (VkOffset2D){ 0, 0 };
+ scissor.extent = context.swapchain->extent;
+ vkCmdSetScissor(encoder->cmd_buffer, 0, 1, &scissor);
+}
+
+// --- Drawing
+
+bool gpu_backend_begin_frame() {
+ u32 current_frame = context.current_frame;
+ vkWaitForFences(context.device->logical_device, 1, &context.in_flight_fences[current_frame],
+ VK_TRUE, UINT64_MAX);
+
+ u32 image_index;
+ VkResult result = vkAcquireNextImageKHR(
+ context.device->logical_device, context.swapchain->handle, UINT64_MAX,
+ context.image_available_semaphores[current_frame], VK_NULL_HANDLE, &image_index);
+ if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR || context.is_resizing) {
+ ERROR("Acquire next image failure. recreate swapchain");
+ context.is_resizing = false;
+ recreate_swapchain(context.swapchain);
+ return false;
+ } else if (result != VK_SUCCESS) {
+ ERROR_EXIT("failed to acquire swapchain image");
+ }
+
+ vkResetFences(context.device->logical_device, 1, &context.in_flight_fences[current_frame]);
+
+ context.current_img_index = image_index;
+ VK_CHECK(vkResetCommandBuffer(context.main_cmd_bufs[current_frame].cmd_buffer, 0));
+ return true;
+}
+
+void gpu_temp_draw(size_t n_indices) {
+ gpu_cmd_encoder* encoder = gpu_get_default_cmd_encoder(); // &context.main_cmd_buf;
+ /* vkCmdDraw(encoder->cmd_buffer, n_verts, 1, 0, 0); */
+ vkCmdDrawIndexed(encoder->cmd_buffer, n_indices, 1, 0, 0, 0);
+}
+
+void gpu_backend_end_frame() {
+ VkPresentInfoKHR present_info = { VK_STRUCTURE_TYPE_PRESENT_INFO_KHR };
+ present_info.waitSemaphoreCount = 1;
+ present_info.pWaitSemaphores = &context.render_finished_semaphores[context.current_frame];
+
+ VkSwapchainKHR swapchains[] = { context.swapchain->handle };
+ present_info.swapchainCount = 1;
+ present_info.pSwapchains = swapchains;
+ present_info.pImageIndices = &context.current_img_index;
+
+ VkResult result = vkQueuePresentKHR(context.device->present_queue, &present_info);
+ if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR) {
+ ERROR("Queue present error. recreate swapchain");
+ recreate_swapchain(context.swapchain);
+ return;
+ } else if (result != VK_SUCCESS) {
+ ERROR_EXIT("failed to present swapchain image");
+ }
+ context.current_frame = (context.current_frame + 1) % MAX_FRAMES_IN_FLIGHT;
+
+ /* vkDeviceWaitIdle(context.device->logical_device); */
+}
+
+// TODO: Move into better order in file
+void gpu_queue_submit(gpu_cmd_buffer* buffer) {
+ VkSubmitInfo submit_info = { VK_STRUCTURE_TYPE_SUBMIT_INFO };
+
+ // Specify semaphore to wait on
+ VkSemaphore wait_semaphores[] = { context.image_available_semaphores[context.current_frame] };
+ VkPipelineStageFlags wait_stages[] = { VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT };
+
+ submit_info.waitSemaphoreCount = 1;
+ submit_info.pWaitSemaphores = wait_semaphores;
+ submit_info.pWaitDstStageMask = wait_stages;
+
+ // Specify semaphore to signal when finished executing buffer
+ VkSemaphore signal_semaphores[] = { context.render_finished_semaphores[context.current_frame] };
+ submit_info.signalSemaphoreCount = 1;
+ submit_info.pSignalSemaphores = signal_semaphores;
+
+ submit_info.commandBufferCount = 1;
+ submit_info.pCommandBuffers = &buffer->cmd_buffer;
+
+ VK_CHECK(vkQueueSubmit(context.device->graphics_queue, 1, &submit_info,
+ context.in_flight_fences[context.current_frame]));
+}
+
+inline void encode_draw_indexed(gpu_cmd_encoder* encoder, u64 index_count) {
+ vkCmdDrawIndexed(encoder->cmd_buffer, index_count, 1, 0, 0, 0);
+}
+
+bool select_physical_device(gpu_device* out_device) {
+ u32 physical_device_count = 0;
+ VK_CHECK(vkEnumeratePhysicalDevices(context.instance, &physical_device_count, 0));
+ if (physical_device_count == 0) {
+ FATAL("No devices that support vulkan were found");
+ return false;
+ }
+ TRACE("Number of devices found %d", physical_device_count);
+
+ VkPhysicalDevice* physical_devices =
+ arena_alloc(&context.temp_arena, physical_device_count * sizeof(VkPhysicalDevice));
+ VK_CHECK(vkEnumeratePhysicalDevices(context.instance, &physical_device_count, physical_devices));
+
+ bool found = false;
+ for (u32 device_i = 0; device_i < physical_device_count; device_i++) {
+ if (is_physical_device_suitable(physical_devices[device_i])) {
+ out_device->physical_device = physical_devices[device_i];
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ FATAL("Couldn't find a suitable physical device");
+ return false;
+ }
+
+ vkGetPhysicalDeviceProperties(out_device->physical_device, &out_device->properties);
+ vkGetPhysicalDeviceFeatures(out_device->physical_device, &out_device->features);
+ vkGetPhysicalDeviceMemoryProperties(out_device->physical_device, &out_device->memory);
+
+ return true;
+}
+
+bool is_physical_device_suitable(VkPhysicalDevice device) {
+ VkPhysicalDeviceProperties properties;
+ vkGetPhysicalDeviceProperties(device, &properties);
+
+ VkPhysicalDeviceFeatures features;
+ vkGetPhysicalDeviceFeatures(device, &features);
+
+ VkPhysicalDeviceMemoryProperties memory;
+ vkGetPhysicalDeviceMemoryProperties(device, &memory);
+
+ // TODO: Check against these device properties
+
+ queue_family_indices indices = find_queue_families(device);
+
+ vulkan_device_query_swapchain_support(device, context.surface, &context.swapchain_support);
+
+ return indices.has_graphics && indices.has_present && context.swapchain_support.mode_count > 0 &&
+ context.swapchain_support.format_count > 0;
+}
+
+queue_family_indices find_queue_families(VkPhysicalDevice device) {
+ queue_family_indices indices = { 0 };
+
+ u32 queue_family_count = 0;
+ vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, 0);
+
+ VkQueueFamilyProperties* queue_families =
+ arena_alloc(&context.temp_arena, queue_family_count * sizeof(VkQueueFamilyProperties));
+ vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, queue_families);
+
+ for (u32 q_fam_i = 0; q_fam_i < queue_family_count; q_fam_i++) {
+ // Graphics queue
+ if (queue_families[q_fam_i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
+ indices.graphics_family_index = q_fam_i;
+ indices.has_graphics = true;
+ }
+
+ VkBool32 present_support = false;
+ vkGetPhysicalDeviceSurfaceSupportKHR(device, q_fam_i, context.surface, &present_support);
+ if (present_support && !indices.has_present) {
+ indices.present_family_index = q_fam_i;
+ indices.has_present = true;
+ }
+ }
+
+ return indices;
+}
+
+bool create_logical_device(gpu_device* out_device) {
+ queue_family_indices indices = find_queue_families(out_device->physical_device);
+ INFO(" %s | %s | %s | %s | %s", bool_str(indices.has_graphics), bool_str(indices.has_present),
+ bool_str(indices.has_compute), bool_str(indices.has_transfer),
+ out_device->properties.deviceName);
+ TRACE("Graphics Family queue index: %d", indices.graphics_family_index);
+ TRACE("Present Family queue index: %d", indices.present_family_index);
+ TRACE("Compute Family queue index: %d", indices.compute_family_index);
+ TRACE("Transfer Family queue index: %d", indices.transfer_family_index);
+
+ // Queues
+ f32 prio_one = 1.0;
+ VkDeviceQueueCreateInfo queue_create_infos[1] = { 0 };
+ queue_create_infos[0].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
+ queue_create_infos[0].queueFamilyIndex = indices.graphics_family_index;
+ queue_create_infos[0].queueCount = 1;
+ queue_create_infos[0].pQueuePriorities = &prio_one;
+ queue_create_infos[0].flags = 0;
+ queue_create_infos[0].pNext = 0;
+
+ // queue_create_infos[1].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
+ // queue_create_infos[1].queueFamilyIndex = indices.present_family_index;
+ // queue_create_infos[1].queueCount = 1;
+ // queue_create_infos[1].pQueuePriorities = &prio_one;
+ // queue_create_infos[1].flags = 0;
+ // queue_create_infos[1].pNext = 0;
+
+ // Features
+ VkPhysicalDeviceFeatures device_features = { 0 };
+ device_features.samplerAnisotropy = VK_TRUE; // request anistrophy
+
+ // Device itself
+ VkDeviceCreateInfo device_create_info = { VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO };
+ device_create_info.queueCreateInfoCount = 1;
+ device_create_info.pQueueCreateInfos = queue_create_infos;
+ device_create_info.pEnabledFeatures = &device_features;
+ device_create_info.enabledExtensionCount = 1;
+ const char* extension_names = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
+ device_create_info.ppEnabledExtensionNames = &extension_names;
+
+ // deprecated
+ device_create_info.enabledLayerCount = 0;
+ device_create_info.ppEnabledLayerNames = 0;
+
+ VkResult result = vkCreateDevice(context.device->physical_device, &device_create_info,
+ context.allocator, &context.device->logical_device);
+ if (result != VK_SUCCESS) {
+ printf("error creating logical device with status %u\n", result);
+ ERROR_EXIT("Unable to create vulkan logical device. Exiting..");
+ }
+ TRACE("Logical device created");
+
+ context.device->queue_family_indicies = indices;
+
+ // Retrieve queue handles
+ vkGetDeviceQueue(context.device->logical_device, indices.graphics_family_index, 0,
+ &context.device->graphics_queue);
+ vkGetDeviceQueue(context.device->logical_device, indices.present_family_index, 0,
+ &context.device->present_queue);
+
+ return true;
+}
+
+VkShaderModule create_shader_module(str8 spirv) {
+ VkShaderModuleCreateInfo create_info = { VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO };
+ create_info.codeSize = spirv.len;
+ create_info.pCode = (uint32_t*)spirv.buf;
+
+ VkShaderModule shader_module;
+ VK_CHECK(vkCreateShaderModule(context.device->logical_device, &create_info, context.allocator,
+ &shader_module));
+
+ return shader_module;
+}
+
+void create_descriptor_pools() {}
+
+void create_swapchain_framebuffers() {
+ WARN("Recreating framebuffers...");
+ u32 image_count = context.swapchain->image_count;
+ context.swapchain_framebuffers =
+ arena_alloc(&context.swapchain->swapchain_arena, image_count * sizeof(VkFramebuffer));
+ for (u32 i = 0; i < image_count; i++) {
+ VkImageView attachments[1] = { context.swapchain->image_views[i] };
+
+ VkFramebufferCreateInfo framebuffer_create_info = { VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO };
+ framebuffer_create_info.attachmentCount = 1;
+ framebuffer_create_info.pAttachments = attachments;
+
+ framebuffer_create_info.renderPass =
+ context.main_renderpass; // TODO: description.renderpass->handle;
+ framebuffer_create_info.width = context.swapchain->extent.width;
+ framebuffer_create_info.height = context.swapchain->extent.height;
+ framebuffer_create_info.layers = 1;
+
+ VK_CHECK(vkCreateFramebuffer(context.device->logical_device, &framebuffer_create_info,
+ context.allocator, &context.swapchain_framebuffers[i]));
+ }
+}
+
+void create_sync_objects() {
+ VkSemaphoreCreateInfo semaphore_info = { VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO };
+ VkFenceCreateInfo fence_info = { VK_STRUCTURE_TYPE_FENCE_CREATE_INFO };
+ fence_info.flags = VK_FENCE_CREATE_SIGNALED_BIT;
+
+ for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) {
+ VK_CHECK(vkCreateSemaphore(context.device->logical_device, &semaphore_info, context.allocator,
+ &context.image_available_semaphores[i]););
+ VK_CHECK(vkCreateSemaphore(context.device->logical_device, &semaphore_info, context.allocator,
+ &context.render_finished_semaphores[i]););
+
+ VK_CHECK(vkCreateFence(context.device->logical_device, &fence_info, context.allocator,
+ &context.in_flight_fences[i]));
+ }
+}
+
+static i32 find_memory_index(u32 type_filter, u32 property_flags) {
+ VkPhysicalDeviceMemoryProperties memory_properties;
+ vkGetPhysicalDeviceMemoryProperties(context.device->physical_device, &memory_properties);
+
+ for (u32 i = 0; i < memory_properties.memoryTypeCount; ++i) {
+ // Check each memory type to see if its bit is set to 1.
+ if (type_filter & (1 << i) &&
+ (memory_properties.memoryTypes[i].propertyFlags & property_flags) == property_flags) {
+ return i;
+ }
+ }
+
+ WARN("Unable to find suitable memory type!");
+ return -1;
+}
+
+buffer_handle gpu_buffer_create(u64 size, gpu_buffer_type buf_type, gpu_buffer_flags flags,
+ const void* data) {
+ VkBufferCreateInfo buffer_info = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+ buffer_info.size = size;
+ buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+
+ switch (buf_type) {
+ case CEL_BUFFER_DEFAULT:
+ buffer_info.usage |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
+ break;
+ case CEL_BUFFER_VERTEX:
+ buffer_info.usage |= VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
+ break;
+ case CEL_BUFFER_INDEX:
+ buffer_info.usage |= VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
+ break;
+ case CEL_BUFFER_UNIFORM:
+ buffer_info.usage |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+ break;
+ case CEL_BUFFER_COUNT:
+ WARN("Incorrect gpu_buffer_type provided. using default");
+ break;
+ }
+
+ buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+
+ // "allocating" the cpu-side buffer struct
+ /* gpu_buffer buffer; */
+ /* buffer.size = size; */
+ buffer_handle handle;
+ gpu_buffer* buffer = buffer_pool_alloc(&context.resource_pools->buffers, &handle);
+ buffer->size = size;
+
+ VK_CHECK(vkCreateBuffer(context.device->logical_device, &buffer_info, context.allocator,
+ &buffer->handle));
+
+ VkMemoryRequirements requirements;
+ vkGetBufferMemoryRequirements(context.device->logical_device, buffer->handle, &requirements);
+
+ // Just make them always need all of them for now
+ i32 memory_index =
+ find_memory_index(requirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
+
+ // Allocate the actual VRAM
+ VkMemoryAllocateInfo allocate_info = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+ allocate_info.allocationSize = requirements.size;
+ allocate_info.memoryTypeIndex = (u32)memory_index;
+
+ vkAllocateMemory(context.device->logical_device, &allocate_info, context.allocator,
+ &buffer->memory);
+ vkBindBufferMemory(context.device->logical_device, buffer->handle, buffer->memory, 0);
+
+ /* Now there are two options:
+ * 1. create CPU-accessible memory -> map memory -> memcpy -> unmap
+ * 2. use a staging buffer thats CPU-accessible and copy its contents to a
+ * GPU-only buffer
+ */
+
+ /* context.buffers[context.buffer_count] = buffer; */
+ /* context.buffer_count++; */
+
+ if (data) {
+ TRACE("Upload data as part of buffer creation");
+ if (flags & CEL_BUFFER_FLAG_CPU) {
+ // map memory -> copy data in -> unmap memory
+ buffer_upload_bytes(handle, (bytebuffer){ .buf = (u8*)data, .size = size }, 0, size);
+ } else if (flags & CEL_BUFFER_FLAG_GPU) {
+ TRACE("Uploading data to buffer using staging buffer");
+ // Create a staging buffer
+ buffer_handle staging = gpu_buffer_create(size, buf_type, CEL_BUFFER_FLAG_CPU, NULL);
+
+ // Copy data into it
+ buffer_upload_bytes(staging, (bytebuffer){ .buf = (u8*)data, .size = size }, 0, size);
+
+ // Enqueue a copy from the staging buffer into the DEVICE_LOCAL buffer
+ gpu_cmd_encoder temp_encoder = gpu_cmd_encoder_create();
+ gpu_cmd_encoder_begin(temp_encoder);
+ encode_buffer_copy(&temp_encoder, staging, 0, handle, 0, size);
+ gpu_cmd_buffer copy_cmd_buffer = gpu_cmd_encoder_finish(&temp_encoder);
+
+ VkSubmitInfo submit_info = { VK_STRUCTURE_TYPE_SUBMIT_INFO };
+ submit_info.commandBufferCount = 1;
+ submit_info.pCommandBuffers = &temp_encoder.cmd_buffer;
+ vkQueueSubmit(context.device->graphics_queue, 1, &submit_info, VK_NULL_HANDLE);
+
+ // Cleanup
+ vkQueueWaitIdle(context.device->graphics_queue);
+ gpu_cmd_encoder_destroy(&temp_encoder);
+ gpu_buffer_destroy(staging);
+ }
+ }
+
+ return handle;
+}
+
+void gpu_buffer_destroy(buffer_handle buffer) {
+ gpu_buffer* b = buffer_pool_get(&context.resource_pools->buffers, buffer);
+ vkDestroyBuffer(context.device->logical_device, b->handle, context.allocator);
+ vkFreeMemory(context.device->logical_device, b->memory, context.allocator);
+ buffer_pool_dealloc(&context.resource_pools->buffers, buffer);
+}
+
+// Upload data to a
+void buffer_upload_bytes(buffer_handle gpu_buf, bytebuffer cpu_buf, u64 offset, u64 size) {
+ gpu_buffer* buffer = buffer_pool_get(&context.resource_pools->buffers, gpu_buf);
+ void* data_ptr;
+ vkMapMemory(context.device->logical_device, buffer->memory, 0, size, 0, &data_ptr);
+ DEBUG("Uploading %d bytes to buffer", size);
+ memcpy(data_ptr, cpu_buf.buf, size);
+ vkUnmapMemory(context.device->logical_device, buffer->memory);
+}
+
+void encode_buffer_copy(gpu_cmd_encoder* encoder, buffer_handle src, u64 src_offset,
+ buffer_handle dst, u64 dst_offset, u64 copy_size) {
+ VkBufferCopy copy_region;
+ copy_region.srcOffset = src_offset;
+ copy_region.dstOffset = dst_offset;
+ copy_region.size = copy_size;
+
+ gpu_buffer* src_buf = buffer_pool_get(&context.resource_pools->buffers, src);
+ gpu_buffer* dst_buf = buffer_pool_get(&context.resource_pools->buffers, dst);
+ vkCmdCopyBuffer(encoder->cmd_buffer, src_buf->handle, dst_buf->handle, 1, &copy_region);
+}
+
+// one-shot command buffers
+VkCommandBuffer vulkan_command_buffer_create_oneshot() {
+ VkCommandBufferAllocateInfo alloc_info = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO };
+ alloc_info.commandPool = context.device->pool;
+ alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
+ alloc_info.commandBufferCount = 1;
+ alloc_info.pNext = 0;
+
+ VkCommandBuffer cmd_buffer;
+ vkAllocateCommandBuffers(context.device->logical_device, &alloc_info, &cmd_buffer);
+
+ VkCommandBufferBeginInfo begin_info = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO };
+ begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+
+ vkBeginCommandBuffer(cmd_buffer, &begin_info);
+
+ return cmd_buffer;
+}
+
+void vulkan_command_buffer_finish_oneshot(VkCommandBuffer cmd_buffer) {
+ VK_CHECK(vkEndCommandBuffer(cmd_buffer));
+
+ // submit to queue
+ VkSubmitInfo submit_info = { VK_STRUCTURE_TYPE_SUBMIT_INFO };
+ submit_info.commandBufferCount = 1;
+ submit_info.pCommandBuffers = &cmd_buffer;
+ VK_CHECK(vkQueueSubmit(context.device->graphics_queue, 1, &submit_info, 0));
+ VK_CHECK(vkQueueWaitIdle(context.device->graphics_queue));
+
+ vkFreeCommandBuffers(context.device->logical_device, context.device->pool, 1, &cmd_buffer);
+}
+
+void copy_buffer_to_buffer_oneshot(buffer_handle src, u64 src_offset, buffer_handle dst,
+ u64 dst_offset, u64 copy_size) {
+ VkBufferCopy copy_region;
+ copy_region.srcOffset = src_offset;
+ copy_region.dstOffset = dst_offset;
+ copy_region.size = copy_size;
+
+ gpu_buffer* src_buf = buffer_pool_get(&context.resource_pools->buffers, src);
+ gpu_buffer* dst_buf = buffer_pool_get(&context.resource_pools->buffers, dst);
+ VkCommandBuffer temp_cmd_buffer = vulkan_command_buffer_create_oneshot();
+ vkCmdCopyBuffer(temp_cmd_buffer, src_buf->handle, dst_buf->handle, 1, &copy_region);
+ vulkan_command_buffer_finish_oneshot(temp_cmd_buffer);
+}
+
+void copy_buffer_to_image_oneshot(buffer_handle src, texture_handle dst) {
+ gpu_buffer* src_buf = buffer_pool_get(&context.resource_pools->buffers, src);
+ gpu_texture* dst_tex = texture_pool_get(&context.resource_pools->textures, dst);
+
+ VkCommandBuffer temp_cmd_buffer = vulkan_command_buffer_create_oneshot();
+
+ VkBufferImageCopy region;
+ region.bufferOffset = 0;
+ region.bufferRowLength = 0;
+ region.bufferImageHeight = 0;
+ region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ region.imageSubresource.mipLevel = 0;
+ region.imageSubresource.baseArrayLayer = 0;
+ region.imageSubresource.layerCount = 1;
+ printf("Image details width: %d height %d\n", dst_tex->desc.extents.x, dst_tex->desc.extents.y);
+ region.imageOffset.x = 0;
+ region.imageOffset.y = 0;
+ region.imageOffset.z = 0;
+ region.imageExtent.width = dst_tex->desc.extents.x;
+ region.imageExtent.height = dst_tex->desc.extents.y;
+ region.imageExtent.depth = 1;
+
+ vkCmdCopyBufferToImage(temp_cmd_buffer, src_buf->handle, dst_tex->handle,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
+
+ vulkan_command_buffer_finish_oneshot(temp_cmd_buffer);
+}
+
+VkImage vulkan_image_create(u32x2 dimensions, VkImageType image_type, VkFormat format,
+ VkImageUsageFlags usage) {
+ VkImage image;
+
+ VkImageCreateInfo image_create_info = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
+ image_create_info.imageType = VK_IMAGE_TYPE_2D;
+ image_create_info.extent.width = dimensions.x;
+ image_create_info.extent.height = dimensions.y;
+ image_create_info.extent.depth = 1;
+ image_create_info.mipLevels = 1;
+ image_create_info.arrayLayers = 1;
+ image_create_info.format = format;
+ image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
+ image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ image_create_info.usage = usage; // VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
+ image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
+
+ VK_CHECK(
+ vkCreateImage(context.device->logical_device, &image_create_info, context.allocator, &image));
+
+ return image;
+}
+
+texture_handle gpu_texture_create(texture_desc desc, bool create_view, const void* data) {
+ VkDeviceSize image_size = desc.extents.x * desc.extents.y * 4;
+ // FIXME: handle this properly
+ VkFormat format = desc.format == CEL_TEXTURE_FORMAT_8_8_8_8_RGBA_UNORM ? VK_FORMAT_R8G8B8A8_SRGB
+ : VK_FORMAT_D32_SFLOAT;
+
+ VkImage image; // vulkan_image_create(desc.extents, VK_IMAGE_TYPE_2D, format,
+ // VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT);
+ VkDeviceMemory image_memory;
+
+ VkImageCreateInfo image_create_info = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
+ image_create_info.imageType = VK_IMAGE_TYPE_2D;
+ image_create_info.extent.width = desc.extents.x;
+ image_create_info.extent.height = desc.extents.y;
+ image_create_info.extent.depth = 1;
+ image_create_info.mipLevels = 1;
+ image_create_info.arrayLayers = 1;
+ image_create_info.format = format;
+ image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
+ image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
+ if (format == VK_FORMAT_D32_SFLOAT) {
+ image_create_info.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ }
+ image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
+
+ VK_CHECK(
+ vkCreateImage(context.device->logical_device, &image_create_info, context.allocator, &image));
+
+ VkMemoryRequirements memory_reqs;
+ vkGetImageMemoryRequirements(context.device->logical_device, image, &memory_reqs);
+
+ VkMemoryAllocateInfo alloc_info = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+ alloc_info.allocationSize = memory_reqs.size;
+ alloc_info.memoryTypeIndex =
+ find_memory_index(memory_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
+ vkAllocateMemory(context.device->logical_device, &alloc_info, context.allocator, &image_memory);
+
+ vkBindImageMemory(context.device->logical_device, image, image_memory, 0);
+
+ texture_handle handle;
+ gpu_texture* texture = texture_pool_alloc(&context.resource_pools->textures, &handle);
+ DEBUG("Allocated texture with handle %d", handle.raw);
+ texture->handle = image;
+ texture->debug_label = "Test Texture";
+ texture->desc = desc;
+ texture->memory = image_memory;
+ texture->size = image_size;
+
+ if (data) {
+ TRACE("Uploading pixel data to texture using staging buffer");
+ // Create a staging buffer
+ buffer_handle staging =
+ gpu_buffer_create(image_size, CEL_BUFFER_DEFAULT, CEL_BUFFER_FLAG_CPU, NULL);
+ // Copy data into it
+ vulkan_transition_image_layout(texture, format, VK_IMAGE_LAYOUT_UNDEFINED,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+ buffer_upload_bytes(staging, (bytebuffer){ .buf = (u8*)data, .size = image_size }, 0,
+ image_size);
+ copy_buffer_to_image_oneshot(staging, handle);
+ vulkan_transition_image_layout(texture, format, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
+
+ gpu_buffer_destroy(staging);
+ }
+
+ // Texture View
+ if (create_view) {
+ VkImageViewCreateInfo view_create_info = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO };
+ view_create_info.image = image;
+ view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
+ view_create_info.format = format;
+ view_create_info.subresourceRange.aspectMask =
+ format == VK_FORMAT_D32_SFLOAT ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
+
+ view_create_info.subresourceRange.baseMipLevel = 0;
+ view_create_info.subresourceRange.levelCount = 1;
+ view_create_info.subresourceRange.baseArrayLayer = 0;
+ view_create_info.subresourceRange.layerCount = 1;
+
+ VK_CHECK(vkCreateImageView(context.device->logical_device, &view_create_info, context.allocator,
+ &texture->view));
+ }
+
+ // Sampler
+ VkSamplerCreateInfo sampler_info = { VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO };
+ sampler_info.magFilter = VK_FILTER_LINEAR;
+ sampler_info.minFilter = VK_FILTER_LINEAR;
+ sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
+ sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
+ sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
+ sampler_info.anisotropyEnable = VK_TRUE;
+ sampler_info.maxAnisotropy = 16;
+ sampler_info.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK;
+ sampler_info.unnormalizedCoordinates = VK_FALSE;
+ sampler_info.compareEnable = VK_FALSE;
+ sampler_info.compareOp = VK_COMPARE_OP_ALWAYS;
+ sampler_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
+ sampler_info.mipLodBias = 0.0;
+ sampler_info.minLod = 0.0;
+ sampler_info.maxLod = 0.0;
+
+ VkResult res = vkCreateSampler(context.device->logical_device, &sampler_info, context.allocator,
+ &texture->sampler);
+ if (res != VK_SUCCESS) {
+ ERROR("Error creating texture sampler for image %s", texture->debug_label);
+ exit(1);
+ }
+
+ return handle;
+}
+
+void vulkan_transition_image_layout(gpu_texture* texture, VkFormat format, VkImageLayout old_layout,
+ VkImageLayout new_layout) {
+ VkCommandBuffer temp_cmd_buffer = vulkan_command_buffer_create_oneshot();
+
+ VkImageMemoryBarrier barrier = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER };
+ barrier.oldLayout = old_layout;
+ barrier.newLayout = new_layout;
+ barrier.srcQueueFamilyIndex = context.device->queue_family_indicies.graphics_family_index;
+ barrier.dstQueueFamilyIndex = context.device->queue_family_indicies.graphics_family_index;
+ barrier.image = texture->handle;
+ barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ barrier.subresourceRange.baseMipLevel = 0;
+ barrier.subresourceRange.levelCount = 1;
+ barrier.subresourceRange.baseArrayLayer = 0;
+ barrier.subresourceRange.layerCount = 1;
+ barrier.srcAccessMask = 0; // TODO
+ barrier.dstAccessMask = 0; // TODO
+
+ VkPipelineStageFlags source_stage;
+ VkPipelineStageFlags dest_stage;
+
+ if (old_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
+ new_layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
+ barrier.srcAccessMask = 0;
+ barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+
+ source_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+ dest_stage = VK_PIPELINE_STAGE_TRANSFER_BIT;
+
+ } else if (old_layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL &&
+ new_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
+ barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ source_stage = VK_PIPELINE_STAGE_TRANSFER_BIT;
+ dest_stage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ } else {
+ FATAL("Unsupported image layout transition");
+ return;
+ }
+
+ vkCmdPipelineBarrier(temp_cmd_buffer, source_stage, dest_stage, 0, 0, 0, 0, 0, 1, &barrier);
+
+ vulkan_command_buffer_finish_oneshot(temp_cmd_buffer);
+}
+
+/* TYPED_POOL(gpu_buffer, buffer); */
+/* TYPED_POOL(gpu_texture, texture); */
+
+/* void resource_pools_init(arena* a, struct resource_pools* res_pools) { */
+/* buffer_pool buf_pool = buffer_pool_create(a, MAX_BUFFERS, sizeof(gpu_buffer)); */
+/* res_pools->buffers = buf_pool; */
+/* texture_pool tex_pool = texture_pool_create(a, MAX_TEXTURES, sizeof(gpu_texture)); */
+/* res_pools->textures = tex_pool; */
+
+/* context.resource_pools = res_pools; */
+/* } */
+
+#endif
diff --git a/src/render/backends/vulkan/backend_vulkan.h b/src/render/backends/vulkan/backend_vulkan.h
new file mode 100644
index 0000000..6ca0bb5
--- /dev/null
+++ b/src/render/backends/vulkan/backend_vulkan.h
@@ -0,0 +1,118 @@
+#pragma once
+#include "defines.h"
+#if defined(CEL_REND_BACKEND_VULKAN)
+#include <vulkan/vk_platform.h>
+#include <vulkan/vulkan.h>
+#include <vulkan/vulkan_core.h>
+
+#include "mem.h"
+#include "ral.h"
+#include "ral_types.h"
+
+#define MAX_FRAMES_IN_FLIGHT 2
+#define GPU_SWAPCHAIN_IMG_COUNT 2
+
+/*
+Conventions:
+ - Place the 'handle' as the first field of a struct
+ - Vulkan specific data goes at the top, followed by our internal data
+*/
+
+typedef struct queue_family_indices {
+ u32 graphics_family_index;
+ u32 present_family_index;
+ u32 compute_family_index;
+ u32 transfer_family_index;
+ bool has_graphics;
+ bool has_present;
+ bool has_compute;
+ bool has_transfer;
+} queue_family_indices;
+
+// typedef struct vulkan_framebuffer {
+// } vulkan_framebuffer;
+
+typedef struct gpu_swapchain {
+ VkSwapchainKHR handle;
+ arena swapchain_arena;
+ VkExtent2D extent;
+ u32x2 dimensions;
+ VkSurfaceFormatKHR image_format;
+ VkPresentModeKHR present_mode;
+ u32 image_count;
+ VkImage* images;
+ VkImageView* image_views;
+} gpu_swapchain;
+
+typedef struct gpu_device {
+ // In Vulkan we store both physical and logical device here
+ VkPhysicalDevice physical_device;
+ VkDevice logical_device;
+ VkPhysicalDeviceProperties properties;
+ VkPhysicalDeviceFeatures features;
+ VkPhysicalDeviceMemoryProperties memory;
+ queue_family_indices queue_family_indicies;
+ VkQueue graphics_queue;
+ VkQueue present_queue;
+ VkQueue compute_queue;
+ VkQueue transfer_queue;
+ VkCommandPool pool;
+} gpu_device;
+
+typedef struct gpu_pipeline_layout {
+ VkPipelineLayout handle;
+} gpu_pipeline_layout;
+
+typedef struct desc_set_uniform_buffer {
+ VkBuffer buffers[MAX_FRAMES_IN_FLIGHT];
+ VkDeviceMemory uniform_buf_memorys[MAX_FRAMES_IN_FLIGHT];
+ void* uniform_buf_mem_mappings[MAX_FRAMES_IN_FLIGHT];
+ size_t size;
+} desc_set_uniform_buffer;
+
+typedef struct gpu_pipeline {
+ VkPipeline handle;
+ VkPipelineLayout layout_handle;
+
+ // Descriptor gubbins
+ shader_data data_layouts[MAX_SHADER_DATA_LAYOUTS];
+ u32 data_layouts_count;
+
+ VkDescriptorSetLayout* desc_set_layouts;
+ // Based on group, we know which data to load
+ desc_set_uniform_buffer* uniform_pointers;
+ u32 desc_set_layouts_count;
+
+} gpu_pipeline;
+
+typedef struct gpu_renderpass {
+ VkRenderPass handle;
+ // TODO: Where to store framebuffers? VkFramebuffer framebuffers[GPU_SWAPCHAIN_IMG_COUNT];
+} gpu_renderpass;
+
+typedef struct gpu_cmd_encoder {
+ VkCommandBuffer cmd_buffer;
+ VkDescriptorPool descriptor_pool;
+ gpu_pipeline* pipeline;
+} gpu_cmd_encoder;
+
+typedef struct gpu_cmd_buffer {
+ VkCommandBuffer cmd_buffer;
+} gpu_cmd_buffer;
+
+typedef struct gpu_buffer {
+ VkBuffer handle;
+ VkDeviceMemory memory;
+ u64 size;
+} gpu_buffer;
+
+typedef struct gpu_texture {
+ VkImage handle;
+ VkDeviceMemory memory;
+ u64 size;
+ texture_desc desc;
+ VkImageView view;
+ VkSampler sampler;
+ char* debug_label;
+} gpu_texture;
+#endif \ No newline at end of file
diff --git a/src/render/backends/vulkan/vulkan_glossary.md b/src/render/backends/vulkan/vulkan_glossary.md
new file mode 100644
index 0000000..4214f9d
--- /dev/null
+++ b/src/render/backends/vulkan/vulkan_glossary.md
@@ -0,0 +1,18 @@
+# Vulkan Glossary
+
+*from https://vkguide.dev/docs/introduction/vulkan_execution/*
+
+- **VkInstance**: The Vulkan context, used to access drivers.
+- **VkPhysicalDevice**: A GPU. Used to query physical GPU details, like features, capabilities, memory size, etc.
+- **VkDevice**: The “logical” GPU context that you actually execute things on.
+- **VkBuffer**: A chunk of GPU visible memory.
+- **VkImage**: A texture you can write to and read from.
+- **VkPipeline**: Holds the state of the gpu needed to draw. For example: shaders, rasterization options, depth settings.
+- **VkRenderPass**: Holds information about the images you are rendering into. All drawing commands have to be done inside a renderpass. Only used in legacy vkguide.
+- **VkFrameBuffer**: Holds the target images for a renderpass. Only used in legacy vkguide.
+- **VkCommandBuffer**: Encodes GPU commands. All execution that is performed on the GPU itself (not in the driver) has to be encoded in a VkCommandBuffer.
+- **VkQueue**: Execution “port” for commands. GPUs will have a set of queues with different properties. Some allow only graphics commands, others only allow memory commands, etc. Command buffers are executed by submitting them into a queue, which will copy the rendering commands onto the GPU for execution.
+- **VkDescriptorSet**: Holds the binding information that connects shader inputs to data such as VkBuffer resources and VkImage textures. Think of it as a set of gpu-side pointers that you bind once.
+- **VkSwapchainKHR**: Holds the images for the screen. It allows you to render things into a visible window. The KHR suffix shows that it comes from an extension, which in this case is VK_KHR_swapchain.
+- **VkSemaphore**: Synchronizes GPU to GPU execution of commands. Used for syncing multiple command buffer submissions one after another.
+- **VkFence**: Synchronizes GPU to CPU execution of commands. Used to know if a command buffer has finished being executed on the GPU.
diff --git a/src/render/backends/vulkan_helpers.h b/src/render/backends/vulkan_helpers.h
new file mode 100644
index 0000000..23666c6
--- /dev/null
+++ b/src/render/backends/vulkan_helpers.h
@@ -0,0 +1,199 @@
+#pragma once
+
+#include <assert.h>
+#include <vulkan/vulkan.h>
+#include <vulkan/vulkan_core.h>
+
+#include "darray.h"
+#include "defines.h"
+#include "log.h"
+#include "str.h"
+
+#define VULKAN_PHYS_DEVICE_MAX_EXTENSION_NAMES 36
+
+DECL_TYPED_ARRAY(const char*, cstr)
+
+static void plat_get_required_extension_names(cstr_darray* extensions) {
+#ifdef CEL_PLATFORM_LINUX
+ cstr_darray_push(extensions, "VK_KHR_xcb_surface");
+#endif
+}
+
+// TODO(omni): port to using internal assert functions
+#define VK_CHECK(vulkan_expr) \
+ do { \
+ VkResult res = vulkan_expr; \
+ if (res != VK_SUCCESS) { \
+ ERROR_EXIT("Vulkan error: %u (%s:%d)", res, __FILE__, __LINE__); \
+ } \
+ } while (0)
+
+// TODO: typedef struct vk_debugger {} vk_debugger;
+
+typedef struct vulkan_physical_device_requirements {
+ bool graphics;
+ bool present;
+ bool compute;
+ bool transfer;
+ str8 device_ext_names[VULKAN_PHYS_DEVICE_MAX_EXTENSION_NAMES];
+ size_t device_ext_name_count;
+ bool sampler_anistropy;
+ bool discrete_gpu;
+} vulkan_physical_device_requirements;
+
+#define VULKAN_MAX_DEFAULT 32
+
+typedef struct vulkan_swapchain_support_info {
+ VkSurfaceCapabilitiesKHR capabilities;
+ VkSurfaceFormatKHR formats[VULKAN_MAX_DEFAULT];
+ u32 format_count;
+ VkPresentModeKHR present_modes[VULKAN_MAX_DEFAULT];
+ u32 mode_count;
+} vulkan_swapchain_support_info;
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vk_debug_callback(
+ VkDebugUtilsMessageSeverityFlagBitsEXT severity, VkDebugUtilsMessageTypeFlagsEXT flags,
+ const VkDebugUtilsMessengerCallbackDataEXT* callback_data, void* user_data);
+
+static void vulkan_device_query_swapchain_support(VkPhysicalDevice device, VkSurfaceKHR surface,
+ vulkan_swapchain_support_info* out_support_info) {
+ // TODO: add VK_CHECK to these calls!
+
+ // Surface capabilities
+ vkGetPhysicalDeviceSurfaceCapabilitiesKHR(device, surface, &out_support_info->capabilities);
+
+ // Surface formats
+ vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &out_support_info->format_count,
+ 0); // Get number of formats
+ if (out_support_info->format_count > 0) {
+ vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &out_support_info->format_count,
+ out_support_info->formats);
+ }
+
+ // Present Modes
+ vkGetPhysicalDeviceSurfacePresentModesKHR(device, surface, &out_support_info->mode_count,
+ 0); // Get number of formats
+ if (out_support_info->mode_count > 0) {
+ vkGetPhysicalDeviceSurfacePresentModesKHR(device, surface, &out_support_info->mode_count,
+ out_support_info->present_modes);
+ }
+}
+
+static VkSurfaceFormatKHR choose_swapchain_format(
+ vulkan_swapchain_support_info* swapchain_support) {
+ assert(swapchain_support->format_count > 0);
+ // find a format
+ for (u32 i = 0; i < swapchain_support->format_count; i++) {
+ VkSurfaceFormatKHR format = swapchain_support->formats[i];
+ if (format.format == VK_FORMAT_B8G8R8A8_SRGB &&
+ format.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) {
+ return format;
+ }
+ }
+ return swapchain_support->formats[0];
+}
+
+// static bool physical_device_meets_requirements(
+// VkPhysicalDevice device, VkSurfaceKHR surface, const VkPhysicalDeviceProperties* properties,
+// const VkPhysicalDeviceFeatures* features,
+// const vulkan_physical_device_requirements* requirements,
+// vulkan_physical_device_queue_family_info* out_queue_info,
+// vulkan_swapchain_support_info* out_swapchain_support) {
+// // TODO: pass in an arena
+
+// out_queue_info->graphics_family_index = -1;
+// out_queue_info->present_family_index = -1;
+// out_queue_info->compute_family_index = -1;
+// out_queue_info->transfer_family_index = -1;
+
+// if (requirements->discrete_gpu) {
+// if (properties->deviceType != VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) {
+// TRACE("Device is not a physical GPU. Skipping.");
+// return false;
+// }
+// }
+
+// u32 queue_family_count = 0;
+// vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, 0);
+// VkQueueFamilyProperties queue_families[queue_family_count];
+// vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, queue_families);
+
+// INFO("Graphics | Present | Compute | Transfer | Name");
+// u8 min_transfer_score = 255;
+// for (u32 i = 0; i < queue_family_count; i++) {
+// u8 current_transfer_score = 0;
+
+// // Graphics queue
+// if (queue_families[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
+// out_queue_info->graphics_family_index = i;
+// current_transfer_score++;
+// }
+
+// // Compute queue
+// if (queue_families[i].queueFlags & VK_QUEUE_COMPUTE_BIT) {
+// out_queue_info->compute_family_index = i;
+// current_transfer_score++;
+// }
+
+// // Transfer queue
+// if (queue_families[i].queueFlags & VK_QUEUE_TRANSFER_BIT) {
+// // always take the lowest score transfer index
+// if (current_transfer_score <= min_transfer_score) {
+// min_transfer_score = current_transfer_score;
+// out_queue_info->transfer_family_index = i;
+// }
+// }
+
+// // Present Queue
+// VkBool32 supports_present = VK_FALSE;
+// vkGetPhysicalDeviceSurfaceSupportKHR(device, i, surface, &supports_present);
+// if (supports_present) {
+// out_queue_info->present_family_index = i;
+// }
+// }
+
+// INFO(" %d | %d | %d | %d | %s",
+// out_queue_info->graphics_family_index != -1, out_queue_info->present_family_index != -1,
+// out_queue_info->compute_family_index != -1, out_queue_info->transfer_family_index != -1,
+// properties->deviceName);
+// TRACE("Graphics Family queue index: %d", out_queue_info->graphics_family_index);
+// TRACE("Present Family queue index: %d", out_queue_info->present_family_index);
+// TRACE("Compute Family queue index: %d", out_queue_info->compute_family_index);
+// TRACE("Transfer Family queue index: %d", out_queue_info->transfer_family_index);
+
+// if ((!requirements->graphics ||
+// (requirements->graphics && out_queue_info->graphics_family_index != -1))) {
+// INFO("Physical device meets our requirements! Proceed.");
+
+// vulkan_device_query_swapchain_support(
+// device, surface, out_swapchain_support
+
+// // TODO: error handling i.e. format count = 0 or present mode = 0
+
+// );
+// return true;
+// }
+
+// return false;
+// }
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vk_debug_callback(
+ VkDebugUtilsMessageSeverityFlagBitsEXT severity, VkDebugUtilsMessageTypeFlagsEXT flags,
+ const VkDebugUtilsMessengerCallbackDataEXT* callback_data, void* user_data) {
+ switch (severity) {
+ default:
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT:
+ ERROR("%s", callback_data->pMessage);
+ break;
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT:
+ WARN("%s", callback_data->pMessage);
+ break;
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT:
+ INFO("%s", callback_data->pMessage);
+ break;
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT:
+ TRACE("%s", callback_data->pMessage);
+ break;
+ }
+ return VK_FALSE;
+} \ No newline at end of file
diff --git a/src/render/bind_group_layouts.h b/src/render/bind_group_layouts.h
new file mode 100644
index 0000000..246d1ef
--- /dev/null
+++ b/src/render/bind_group_layouts.h
@@ -0,0 +1,30 @@
+/**
+ * @file bind_group_layouts.h
+ * @author your name (you@domain.com)
+ * @brief Common bindgroups (descriptor set layouts)
+ * @version 0.1
+ * @date 2024-04-28
+ *
+ * @copyright Copyright (c) 2024
+ *
+ */
+#pragma once
+#include "defines.h"
+#include "maths_types.h"
+
+// Three major sets
+
+// 1. Scene / Global
+typedef struct bg_globals {
+ mat4 view;
+ mat4 projection;
+ f32 total_time;
+ f32 delta_time;
+} bg_globals;
+
+// 2. Material (once per object)
+
+// 3. Per draw call
+typedef struct bg_model {
+ mat4 model;
+} bg_model;
diff --git a/src/render/builtin_materials.h b/src/render/builtin_materials.h
new file mode 100644
index 0000000..f2db5f4
--- /dev/null
+++ b/src/render/builtin_materials.h
@@ -0,0 +1,154 @@
+/**
+ * @file builtin_materials.h
+ * @author your name (you@domain.com)
+ * @brief
+ * @version 0.1
+ * @date 2024-06-15
+ *
+ * @copyright Copyright (c) 2024
+ *
+ */
+#pragma once
+
+#include <assert.h>
+#include "defines.h"
+#include "ral_types.h"
+
+// Currently supported materials
+// - Blinn Phong (textured)
+// - PBR (params)
+// - PBR (textured)
+
+// Thoughts
+// --------
+//
+// A material and a shader are inextricably linked. The input data for a shader needs the material.
+// However, a shader may require more than just a material?
+
+// --- Common uniform blocks
+
+/* In glsl code we call it 'MVP_Matrices' */
+typedef struct mvp_matrix_uniforms {
+ mat4 model;
+ mat4 view;
+ mat4 projection;
+} mvp_matrix_uniforms;
+
+// --- PBR (params)
+
+typedef struct pbr_params_material_uniforms {
+ vec3 albedo;
+ f32 metallic;
+ f32 roughness;
+ f32 ao;
+ f32 pad[2];
+} pbr_params_material_uniforms;
+
+typedef struct pbr_point_light {
+ vec3 pos;
+ f32 pad;
+ vec3 color;
+ f32 pad2;
+} pbr_point_light;
+
+typedef struct pbr_params_light_uniforms {
+ pbr_point_light pointLights[4];
+ vec4 viewPos;
+} pbr_params_light_uniforms;
+
+typedef struct pbr_params_bindgroup {
+ mvp_matrix_uniforms mvp_matrices;
+ pbr_params_material_uniforms material;
+ pbr_params_light_uniforms lights;
+} pbr_params_bindgroup;
+
+static shader_data_layout pbr_params_shader_layout(void* data) {
+ pbr_params_bindgroup* d = (pbr_params_bindgroup*)data;
+ bool has_data = data != NULL;
+
+ shader_binding b1 = { .label = "MVP_Matrices",
+ .type = SHADER_BINDING_BYTES,
+ .stores_data = has_data,
+ .data = { .bytes = { .size = sizeof(mvp_matrix_uniforms) } } };
+
+ shader_binding b2 = { .label = "PBR_Params",
+ .type = SHADER_BINDING_BYTES,
+ .stores_data = has_data,
+ .data = { .bytes = { .size = sizeof(pbr_params_material_uniforms) } } };
+
+ shader_binding b3 = { .label = "Scene_Lights",
+ .type = SHADER_BINDING_BYTES,
+ .stores_data = has_data,
+ .data = { .bytes = { .size = sizeof(pbr_params_light_uniforms) } } };
+
+ if (has_data) {
+ // printf("Size %d \n", b3.data.bytes.size);
+ b1.data.bytes.data = &d->mvp_matrices;
+ b2.data.bytes.data = &d->material;
+ /* d->lights.viewPos = vec3(0, 1, 0); */
+ b3.data.bytes.data = &d->lights;
+ // print_vec3(d->lights.viewPos);
+ }
+
+ return (shader_data_layout){ .name = "pbr_params", .bindings = { b1, b2, b3 }, .bindings_count = 3
+
+ };
+}
+
+static void* shader_layout_get_binding(shader_data_layout* layout, u32 nth_binding) {
+ assert(nth_binding < layout->bindings_count);
+ return &layout->bindings[nth_binding].data;
+}
+
+typedef struct pbr_textures {
+ texture_handle albedo_tex;
+ texture_handle metal_roughness_tex;
+ texture_handle ao_tex;
+ texture_handle normal_tex;
+} pbr_textures;
+
+typedef struct pbr_textured_bindgroup {
+ mvp_matrix_uniforms mvp_matrices;
+ pbr_params_light_uniforms lights;
+ pbr_textures textures;
+} pbr_textured_bindgroup;
+
+static shader_data_layout pbr_textured_shader_layout(void* data) {
+ pbr_textured_bindgroup* d = (pbr_textured_bindgroup*)data;
+ bool has_data = data != NULL;
+
+ shader_binding b1 = { .label = "MVP_Matrices",
+ .type = SHADER_BINDING_BYTES,
+ .stores_data = has_data,
+ .data = { .bytes = { .size = sizeof(mvp_matrix_uniforms) } } };
+
+ shader_binding b2 = { .label = "Scene_Lights",
+ .type = SHADER_BINDING_BYTES,
+ .stores_data = has_data,
+ .data = { .bytes = { .size = sizeof(pbr_params_light_uniforms) } } };
+
+ shader_binding b3 = {.label = "albedoMap",
+ .type = SHADER_BINDING_TEXTURE,
+ .stores_data = has_data };
+ shader_binding b4 = {.label = "metallicRoughnessMap",
+ .type = SHADER_BINDING_TEXTURE,
+ .stores_data = has_data };
+ shader_binding b5 = {.label = "aoMap",
+ .type = SHADER_BINDING_TEXTURE,
+ .stores_data = has_data };
+ shader_binding b6 = {.label = "normalMap",
+ .type = SHADER_BINDING_TEXTURE,
+ .stores_data = has_data };
+
+ if (has_data) {
+ b1.data.bytes.data = &d->mvp_matrices;
+ b2.data.bytes.data = &d->lights;
+ b3.data.texture.handle = d->textures.albedo_tex;
+ b4.data.texture.handle = d->textures.metal_roughness_tex;
+ b5.data.texture.handle = d->textures.ao_tex;
+ b6.data.texture.handle = d->textures.normal_tex;
+ }
+
+ return (shader_data_layout){ .name = "pbr_params", .bindings = { b1, b2, b3, b4, b5, b6 }, .bindings_count = 6
+ };
+}
diff --git a/src/render/immediate.c b/src/render/immediate.c
new file mode 100644
index 0000000..63a62b8
--- /dev/null
+++ b/src/render/immediate.c
@@ -0,0 +1,46 @@
+#include "immediate.h"
+#include "glad/glad.h"
+#include "maths.h"
+#include "primitives.h"
+#include "ral_types.h"
+#include "render.h"
+#include "render_types.h"
+
+typedef struct immdraw_system {
+ // primitive meshes (get reused for each draw call)
+ mesh plane;
+ mesh cube;
+ mesh sphere;
+ // command lists
+
+} immdraw_system;
+
+bool immdraw_system_init(immdraw_system* state) {
+ geometry_data plane_geometry = geo_create_plane(f32x2(1, 1));
+ state->plane = mesh_create(&plane_geometry, true);
+
+ geometry_data cube_geometry = geo_create_cuboid(f32x3(1, 1, 1));
+ state->cube = mesh_create(&cube_geometry, true);
+
+ geometry_data sphere_geometry = geo_create_uvsphere(1.0, 48, 48);
+ state->sphere = mesh_create(&sphere_geometry, true);
+
+ return true;
+}
+
+void immdraw_plane(vec3 pos, quat rotation, f32 u_scale, f32 v_scale, vec4 colour) {}
+
+void immdraw_system_render(immdraw_system* state) {}
+
+// void imm_draw_sphere(vec3 pos, f32 radius, vec4 colour) {
+// // Create the vertices
+// geometry_data geometry = geo_create_uvsphere(radius, 16, 16);
+// geo_set_vertex_colours(&geometry, colour);
+
+// // Upload to GPU
+// mat4 model = mat4_translation(pos);
+
+// // Set pipeline
+
+// // Draw
+// } \ No newline at end of file
diff --git a/src/render/immediate.h b/src/render/immediate.h
new file mode 100644
index 0000000..f4b1729
--- /dev/null
+++ b/src/render/immediate.h
@@ -0,0 +1,19 @@
+#pragma once
+
+#include "geometry.h"
+#include "maths_types.h"
+
+typedef struct immdraw_system immdraw_system;
+
+bool immdraw_system_init(immdraw_system* state);
+void immdraw_system_shutdown(immdraw_system* state);
+void immdraw_system_render(immdraw_system* state);
+
+// 3. SIMA (simplified immediate mode api) / render.h
+// - dont need to worry about uploading mesh data
+// - very useful for debugging
+void immdraw_plane(vec3 pos, quat rotation, f32 u_scale, f32 v_scale, vec4 colour);
+void immdraw_cuboid(vec3 pos, quat rotation, f32x3 extents, vec4 colour);
+void immdraw_sphere(vec3 pos, f32 radius, vec4 colour);
+
+void immdraw_camera_frustum();
diff --git a/src/render/ral.c b/src/render/ral.c
new file mode 100644
index 0000000..9ca99ce
--- /dev/null
+++ b/src/render/ral.c
@@ -0,0 +1,97 @@
+#include "ral.h"
+#include "file.h"
+#include "log.h"
+#include "mem.h"
+#include "str.h"
+
+#if defined(CEL_REND_BACKEND_VULKAN)
+#include "backend_vulkan.h"
+#elif defined(CEL_REND_BACKEND_METAL)
+#include "backend_metal.h"
+#elif defined(CEL_REND_BACKEND_OPENGL)
+#include "backend_opengl.h"
+#endif
+
+size_t vertex_attrib_size(vertex_attrib_type attr) {
+ switch (attr) {
+ case ATTR_F32:
+ case ATTR_U32:
+ case ATTR_I32:
+ return 4;
+ case ATTR_F32x2:
+ case ATTR_U32x2:
+ case ATTR_I32x2:
+ return 8;
+ case ATTR_F32x3:
+ case ATTR_U32x3:
+ case ATTR_I32x3:
+ return 12;
+ case ATTR_F32x4:
+ case ATTR_U32x4:
+ case ATTR_I32x4:
+ return 16;
+ break;
+ }
+}
+
+void vertex_desc_add(vertex_description* builder, const char* name, vertex_attrib_type type) {
+ u32 i = builder->attributes_count;
+
+ size_t size = vertex_attrib_size(type);
+ builder->attributes[i] = type;
+ builder->stride += size;
+ builder->attr_names[i] = name;
+
+ builder->attributes_count++;
+}
+
+vertex_description static_3d_vertex_description() {
+ vertex_description builder = { .debug_label = "Standard static 3d vertex format" };
+ vertex_desc_add(&builder, "inPosition", ATTR_F32x3);
+ vertex_desc_add(&builder, "inNormal", ATTR_F32x3);
+ vertex_desc_add(&builder, "inTexCoords", ATTR_F32x2);
+ builder.use_full_vertex_size = true;
+ return builder;
+}
+
+void backend_pools_init(arena* a, gpu_backend_pools* backend_pools) {
+ pipeline_layout_pool pipeline_layout_pool =
+ pipeline_layout_pool_create(a, MAX_PIPELINES, sizeof(gpu_pipeline_layout));
+ backend_pools->pipeline_layouts = pipeline_layout_pool;
+ pipeline_pool pipeline_pool = pipeline_pool_create(a, MAX_PIPELINES, sizeof(gpu_pipeline));
+ backend_pools->pipelines = pipeline_pool;
+ renderpass_pool rpass_pool = renderpass_pool_create(a, MAX_RENDERPASSES, sizeof(gpu_renderpass));
+ backend_pools->renderpasses = rpass_pool;
+
+ // context.gpu_pools;
+}
+
+void resource_pools_init(arena* a, struct resource_pools* res_pools) {
+ buffer_pool buf_pool = buffer_pool_create(a, MAX_BUFFERS, sizeof(gpu_buffer));
+ res_pools->buffers = buf_pool;
+ texture_pool tex_pool = texture_pool_create(a, MAX_TEXTURES, sizeof(gpu_texture));
+ res_pools->textures = tex_pool;
+
+ // context.resource_pools = res_pools;
+}
+
+void print_shader_binding(shader_binding b) {
+ printf("Binding name: %s type %s vis %d stores data %d\n", b.label,
+ shader_binding_type_name[b.type], b.vis, b.stores_data);
+}
+
+shader_desc shader_quick_load(const char* filepath) {
+ arena a = arena_create(malloc(1024 * 1024), 1024 * 1024);
+ str8 path = str8_cstr_view(filepath);
+ str8_opt shader = str8_from_file(&a, path);
+ if (!shader.has_value) {
+ ERROR_EXIT("Failed to load shaders from disk");
+ }
+
+ return (shader_desc){
+ .debug_name = filepath,
+ .code = shader.contents,
+ .filepath = path,
+ .is_spirv = true,
+ };
+}
diff --git a/src/render/ral.h b/src/render/ral.h
new file mode 100644
index 0000000..792bb4e
--- /dev/null
+++ b/src/render/ral.h
@@ -0,0 +1,198 @@
+/**
+ * @file ral.h
+ * @author your name (you@domain.com)
+ * @brief Render Abstraction Layer
+ * @details API that a graphics backend *must* implement
+ * @version 0.1
+ * @date 2024-03-31
+ *
+ * @copyright Copyright (c) 2024
+ *
+ */
+#pragma once
+
+#include "buf.h"
+#include "defines.h"
+#include "mem.h"
+#include "ral_types.h"
+#include "str.h"
+
+// Unrelated forward declares
+struct GLFWwindow;
+
+// Forward declare structs - these must be defined in the backend implementation
+typedef struct gpu_swapchain gpu_swapchain;
+typedef struct gpu_device gpu_device;
+typedef struct gpu_pipeline_layout gpu_pipeline_layout;
+typedef struct gpu_pipeline gpu_pipeline;
+typedef struct gpu_renderpass gpu_renderpass;
+typedef struct gpu_cmd_encoder gpu_cmd_encoder; // Recording
+typedef struct gpu_cmd_buffer gpu_cmd_buffer; // Ready for submission
+typedef struct gpu_buffer gpu_buffer;
+typedef struct gpu_texture gpu_texture;
+
+// #define MAX_SHADER_DATA_LAYOUTS 5
+// #define MAX_BUFFERS 256
+// #define MAX_TEXTURES 256
+// #define MAX_PIPELINES 128
+// #define MAX_RENDERPASSES 128
+
+// TYPED_POOL(gpu_buffer, buffer);
+// TYPED_POOL(gpu_texture, texture);
+
+// TYPED_POOL(gpu_pipeline_layout, pipeline_layout);
+// TYPED_POOL(gpu_pipeline, pipeline);
+// TYPED_POOL(gpu_renderpass, renderpass);
+
+// // --- Handy macros
+// #define BUFFER_GET(h) (buffer_pool_get(&context.resource_pools->buffers, h))
+// #define TEXTURE_GET(h) (texture_pool_get(&context.resource_pools->textures, h))
+
+// --- Pools
+// typedef struct gpu_backend_pools {
+// pipeline_pool pipelines;
+// pipeline_layout_pool pipeline_layouts;
+// renderpass_pool renderpasses;
+// } gpu_backend_pools;
+// void backend_pools_init(arena* a, gpu_backend_pools* backend_pools);
+
+// struct resource_pools {
+// buffer_pool buffers;
+// texture_pool textures;
+// };
+// void resource_pools_init(arena* a, struct resource_pools* res_pools);
+
+// --- Pipeline description
+typedef enum pipeline_kind {
+ PIPELINE_GRAPHICS,
+ PIPELINE_COMPUTE,
+} pipeline_kind;
+
+typedef struct shader_desc {
+ const char* debug_name;
+ str8 filepath; // Where it came from
+ str8 code; // Either GLSL or SPIRV bytecode
+ bool is_spirv;
+ bool is_combined_vert_frag; // Contains both vertex and fragment stages
+} shader_desc;
+
+shader_desc shader_quick_load(const char* filepath);
+/** @brief Hot reloads shaders for the given pipeline. Returns how long it took in milliseconds */
+u64 gpu_pipeline_reload_shaders(gpu_pipeline* pipeline); // TODO
+
+struct graphics_pipeline_desc {
+ const char* debug_name;
+ vertex_description vertex_desc;
+ shader_desc vs; /** @brief Vertex shader stage */
+ shader_desc fs; /** @brief Fragment shader stage */
+
+ // Roughly equivalent to a descriptor set layout each. each layout can have multiple bindings
+ // examples:
+ // - uniform buffer reprensenting view projection matrix
+ // - texture for shadow map
+ shader_data data_layouts[MAX_SHADER_DATA_LAYOUTS];
+ u32 data_layouts_count;
+
+ // gpu_pipeline_layout* layout;
+ gpu_renderpass* renderpass;
+
+ bool wireframe;
+ bool depth_test;
+};
+
+typedef struct gpu_renderpass_desc {
+ bool default_framebuffer;
+ bool has_color_target;
+ texture_handle color_target; // for now only support one
+ bool has_depth_stencil;
+ texture_handle depth_stencil;
+} gpu_renderpass_desc;
+
+// --- Lifecycle functions
+bool gpu_backend_init(const char* window_name, struct GLFWwindow* window);
+void gpu_backend_shutdown();
+void resource_pools_init(arena* a, struct resource_pools* res_pools);
+
+bool gpu_device_create(gpu_device* out_device);
+void gpu_device_destroy();
+
+// --- Render Pipeline
+gpu_pipeline* gpu_graphics_pipeline_create(struct graphics_pipeline_desc description);
+void gpu_pipeline_destroy(gpu_pipeline* pipeline);
+
+// --- Renderpass
+gpu_renderpass* gpu_renderpass_create(const gpu_renderpass_desc* description);
+void gpu_renderpass_destroy(gpu_renderpass* pass);
+
+// --- Swapchain
+bool gpu_swapchain_create(gpu_swapchain* out_swapchain);
+void gpu_swapchain_destroy(gpu_swapchain* swapchain);
+
+// --- Command buffer
+gpu_cmd_encoder gpu_cmd_encoder_create();
+void gpu_cmd_encoder_destroy(gpu_cmd_encoder* encoder);
+void gpu_cmd_encoder_begin(gpu_cmd_encoder encoder);
+void gpu_cmd_encoder_begin_render(gpu_cmd_encoder* encoder, gpu_renderpass* renderpass);
+void gpu_cmd_encoder_end_render(gpu_cmd_encoder* encoder);
+void gpu_cmd_encoder_begin_compute();
+gpu_cmd_encoder* gpu_get_default_cmd_encoder();
+
+/** @brief Finish recording and return a command buffer that can be submitted to a queue */
+gpu_cmd_buffer gpu_cmd_encoder_finish(gpu_cmd_encoder* encoder);
+
+void gpu_queue_submit(gpu_cmd_buffer* buffer);
+
+// --- Data copy commands
+/** @brief Copy data from one buffer to another */
+void encode_buffer_copy(gpu_cmd_encoder* encoder, buffer_handle src, u64 src_offset,
+ buffer_handle dst, u64 dst_offset, u64 copy_size);
+/** @brief Upload CPU-side data as array of bytes to a GPU buffer */
+void buffer_upload_bytes(buffer_handle gpu_buf, bytebuffer cpu_buf, u64 offset, u64 size);
+
+/** @brief Copy data from buffer to buffer using a one time submit command buffer and a wait */
+void copy_buffer_to_buffer_oneshot(buffer_handle src, u64 src_offset, buffer_handle dst,
+ u64 dst_offset, u64 copy_size);
+/** @brief Copy data from buffer to an image using a one time submit command buffer */
+void copy_buffer_to_image_oneshot(buffer_handle src, texture_handle dst);
+
+// --- Render commands
+void encode_bind_pipeline(gpu_cmd_encoder* encoder, pipeline_kind kind, gpu_pipeline* pipeline);
+void encode_bind_shader_data(gpu_cmd_encoder* encoder, u32 group, shader_data* data);
+void encode_set_default_settings(gpu_cmd_encoder* encoder);
+void encode_set_vertex_buffer(gpu_cmd_encoder* encoder, buffer_handle buf);
+void encode_set_index_buffer(gpu_cmd_encoder* encoder, buffer_handle buf);
+void encode_set_bind_group(); // TODO
+void encode_draw(gpu_cmd_encoder* encoder, u64 count);
+void encode_draw_indexed(gpu_cmd_encoder* encoder, u64 index_count);
+void encode_clear_buffer(gpu_cmd_encoder* encoder, buffer_handle buf);
+
+// --- Buffers
+buffer_handle gpu_buffer_create(u64 size, gpu_buffer_type buf_type, gpu_buffer_flags flags,
+ const void* data);
+void gpu_buffer_destroy(buffer_handle buffer);
+void gpu_buffer_upload(const void* data);
+
+// Textures
+/** @brief Create a new GPU texture resource.
+ * @param create_view creates a texture view (with same dimensions) at the same time
+ * @param data if not NULL then the data stored at the pointer will be uploaded to the GPU texture
+ * @note automatically creates a sampler for you */
+texture_handle gpu_texture_create(texture_desc desc, bool create_view, const void* data);
+void gpu_texture_destroy(texture_handle);
+void gpu_texture_upload(texture_handle texture, const void* data);
+
+// --- Vertex formats
+bytebuffer vertices_as_bytebuffer(arena* a, vertex_format format, vertex_darray* vertices);
+
+void vertex_desc_add(vertex_description* builder, const char* name, vertex_attrib_type type);
+
+// --- TEMP
+bool gpu_backend_begin_frame();
+void gpu_backend_end_frame();
+void gpu_temp_draw(size_t n_verts);
+
+// TODO: --- Compute
+
+// --- Helpers
+vertex_description static_3d_vertex_description();
+size_t vertex_attrib_size(vertex_attrib_type attr);
diff --git a/src/render/ral_types.h b/src/render/ral_types.h
new file mode 100644
index 0000000..5f21846
--- /dev/null
+++ b/src/render/ral_types.h
@@ -0,0 +1,257 @@
+/**
+ * @file ral_types.h
+ * @author your name (you@domain.com)
+ * @brief Struct and enum definitions for RAL
+ * @version 0.1
+ * @date 2024-04-27
+ *
+ * @copyright Copyright (c) 2024
+ *
+ */
+#pragma once
+
+#include "darray.h"
+#include "defines.h"
+#include "maths_types.h"
+
+#define MAX_VERTEX_ATTRIBUTES 16
+
+/* #ifndef RENDERER_TYPED_HANDLES */
+CORE_DEFINE_HANDLE(buffer_handle);
+CORE_DEFINE_HANDLE(texture_handle);
+CORE_DEFINE_HANDLE(sampler_handle);
+CORE_DEFINE_HANDLE(shader_handle);
+CORE_DEFINE_HANDLE(pipeline_layout_handle);
+CORE_DEFINE_HANDLE(pipeline_handle);
+CORE_DEFINE_HANDLE(renderpass_handle);
+#define ABSENT_MODEL_HANDLE 999999999
+
+// gpu types
+typedef enum gpu_primitive_topology {
+ CEL_PRIMITIVE_TOPOLOGY_POINT,
+ CEL_PRIMITIVE_TOPOLOGY_LINE,
+ CEL_PRIMITIVE_TOPOLOGY_LINE_STRIP,
+ CEL_PRIMITIVE_TOPOLOGY_TRIANGLE,
+ CEL_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
+ CEL_PRIMITIVE_TOPOLOGY_COUNT
+} cel_primitive_topology;
+
+typedef enum gpu_texture_type {
+ CEL_TEXTURE_TYPE_2D,
+ CEL_TEXTURE_TYPE_3D,
+ CEL_TEXTURE_TYPE_2D_ARRAY,
+ CEL_TEXTURE_TYPE_CUBE_MAP,
+ CEL_TEXTURE_TYPE_COUNT
+} gpu_texture_type;
+
+typedef enum gpu_texture_format {
+ CEL_TEXTURE_FORMAT_8_8_8_8_RGBA_UNORM,
+ CEL_TEXTURE_FORMAT_DEPTH_DEFAULT,
+ CEL_TEXTURE_FORMAT_COUNT
+} gpu_texture_format;
+
+/** @brief Texture Description - used by texture creation functions */
+typedef struct texture_desc {
+ gpu_texture_type tex_type;
+ gpu_texture_format format;
+ u32x2 extents;
+} texture_desc;
+
+typedef enum gpu_buffer_type {
+ CEL_BUFFER_DEFAULT, // on Vulkan this would be a storage buffer?
+ CEL_BUFFER_VERTEX,
+ CEL_BUFFER_INDEX,
+ CEL_BUFFER_UNIFORM,
+ CEL_BUFFER_COUNT
+} gpu_buffer_type;
+
+static const char* buffer_type_names[] = {
+ "RAL Buffer Default", "RAL Buffer Vertex", "RAL Buffer Index",
+ "RAL Buffer Uniform", "RAL Buffer Count",
+};
+
+typedef enum gpu_buffer_flag {
+ CEL_BUFFER_FLAG_CPU = 1 << 0,
+ CEL_BUFFER_FLAG_GPU = 1 << 1,
+ CEL_BUFFER_FLAG_STORAGE = 1 << 2,
+ CEL_BUFFER_FLAG_COUNT
+} gpu_buffer_flag;
+typedef u32 gpu_buffer_flags;
+
+typedef enum vertex_format {
+ VERTEX_STATIC_3D,
+ VERTEX_SPRITE,
+ VERTEX_SKINNED,
+ VERTEX_COLOURED_STATIC_3D,
+ VERTEX_RAW_POS_COLOUR,
+ VERTEX_COUNT
+} vertex_format;
+
+typedef union vertex {
+ struct {
+ vec3 position;
+ vec3 normal;
+ vec2 tex_coords;
+ } static_3d; /** @brief standard vertex format for static geometry in 3D */
+
+ struct {
+ vec2 position;
+ vec4 colour;
+ vec2 tex_coords;
+ } sprite; /** @brief vertex format for 2D sprites or quads */
+
+ struct {
+ vec3 position;
+ vec4 colour;
+ vec2 tex_coords;
+ vec3 normal;
+ vec4i bone_ids; // Integer vector for bone IDs
+ vec4 bone_weights; // Weight of each bone's influence
+ } skinned_3d; /** @brief vertex format for skeletal (animated) geometry in 3D */
+
+ struct {
+ vec3 position;
+ vec2 tex_coords;
+ vec3 normal;
+ vec4 colour;
+ } coloured_static_3d; /** @brief vertex format used for debugging */
+
+ struct {
+ vec2 position;
+ vec3 colour;
+ } raw_pos_colour;
+} vertex;
+
+#ifndef TYPED_VERTEX_ARRAY
+KITC_DECL_TYPED_ARRAY(vertex)
+KITC_DECL_TYPED_ARRAY(u32)
+#define TYPED_VERTEX_ARRAY
+#endif
+
+// TEMP
+typedef struct custom_vertex {
+ vec2 pos;
+ vec3 color;
+} custom_vertex;
+
+// Vertex attributes
+/// @strip_prefix(ATTR_)
+typedef enum vertex_attrib_type {
+ ATTR_F32,
+ ATTR_F32x2,
+ ATTR_F32x3,
+ ATTR_F32x4,
+ ATTR_U32,
+ ATTR_U32x2,
+ ATTR_U32x3,
+ ATTR_U32x4,
+ ATTR_I32,
+ ATTR_I32x2,
+ ATTR_I32x3,
+ ATTR_I32x4,
+} vertex_attrib_type;
+
+typedef struct vertex_description {
+ char* debug_label;
+ const char* attr_names[MAX_VERTEX_ATTRIBUTES];
+ vertex_attrib_type attributes[MAX_VERTEX_ATTRIBUTES];
+ u32 attributes_count;
+ size_t stride;
+ bool use_full_vertex_size;
+} vertex_description;
+
+// --- Shaders & Bindings
+
+typedef enum shader_visibility {
+ VISIBILITY_VERTEX = 1 << 0,
+ VISIBILITY_FRAGMENT = 1 << 1,
+ VISIBILITY_COMPUTE = 1 << 2,
+} shader_visibility;
+
+/** @brief Describes the kind of binding a `shader_binding` is for. This changes how we create
+ * backing data for it. */
+typedef enum shader_binding_type {
+ /**
+ * @brief Binds a buffer to a shader
+ * @note Vulkan: Becomes a Storage Buffer
+ */
+ SHADER_BINDING_BUFFER,
+ SHADER_BINDING_BUFFER_ARRAY,
+ SHADER_BINDING_TEXTURE,
+ SHADER_BINDING_TEXTURE_ARRAY,
+ SHADER_BINDING_SAMPLER,
+ /**
+ * @brief Binds raw data to a shader
+ * @note Vulkan: Becomes a Uniform Buffer
+ */
+ SHADER_BINDING_BYTES,
+ // TODO: Acceleration Structure
+ SHADER_BINDING_COUNT
+} shader_binding_type;
+
+static const char* shader_binding_type_name[] = { "BUFFER", "BUFFER ARRAY", "TEXTURE",
+ "TEXTURE ARRAY", "SAMPLER", "BYTES",
+ "COUNT" };
+
+// pub trait ShaderBindable: Clone + Copy {
+// fn bind_to(&self, context: &mut PipelineContext, index: u32);
+// }
+
+typedef struct shader_binding {
+ const char* label;
+ shader_binding_type type;
+ shader_visibility vis;
+ bool stores_data; /** @brief if this is true then the shader binding has references to live data,
+ if false then its just being used to describe a layout and .data
+ should be zeroed */
+ union {
+ struct {
+ buffer_handle handle;
+ } buffer;
+ struct {
+ void* data;
+ size_t size;
+ } bytes;
+ struct {
+ texture_handle handle;
+ } texture;
+ } data; /** @brief can store any kind of data that we can bind to a shader / descriptor set */
+} shader_binding;
+
+#define MAX_LAYOUT_BINDINGS 8
+
+void print_shader_binding(shader_binding b);
+
+/** @brief A list of bindings that describe what data a shader / pipeline expects
+ @note This roughly correlates to a descriptor set layout in Vulkan
+*/
+typedef struct shader_data_layout {
+ char* name;
+ shader_binding bindings[MAX_LAYOUT_BINDINGS];
+ u32 bindings_count;
+} shader_data_layout;
+
+typedef struct shader_data {
+ shader_data_layout (*shader_data_get_layout)(void* data);
+ void* data;
+} shader_data;
+
+/*
+ Usage:
+ 1. When we create the pipeline, we must call a function that return a layout without .data
+ fields
+ 2. When binding
+*/
+
+typedef enum gpu_cull_mode { CULL_BACK_FACE, CULL_FRONT_FACE, CULL_COUNT } gpu_cull_mode;
+
+// ? How to tie together materials and shaders
+
+// Three registers
+// 1. low level graphics api calls "ral"
+// 2. higher level render calls
+// 3. simplified immediate mode API
+
+// 3 - you don't need to know how the renderer works at all
+// 2 - you need to know how the overall renderer is designed
+// 1 - you need to understand graphics API specifics
diff --git a/src/render/render.c b/src/render/render.c
new file mode 100644
index 0000000..f52e2be
--- /dev/null
+++ b/src/render/render.c
@@ -0,0 +1,287 @@
+#include <glfw3.h>
+#include "maths_types.h"
+#include "render_types.h"
+#define STB_IMAGE_IMPLEMENTATION
+#include <stb_image.h>
+
+#include "camera.h"
+#include "file.h"
+#include "log.h"
+#include "mem.h"
+#include "ral.h"
+#include "ral_types.h"
+#include "render.h"
+
+//---NEW
+#include "static_pipeline.h"
+//---END
+
+/** @brief Creates the pipelines built into Celeritas such as rendering static opaque geometry,
+ debug visualisations, immediate mode UI, etc */
+void default_pipelines_init(renderer* ren);
+
+bool renderer_init(renderer* ren) {
+ // INFO("Renderer init");
+
+ // NOTE: all platforms use GLFW at the moment but thats subject to change
+ glfwInit();
+
+#if defined(CEL_REND_BACKEND_OPENGL)
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
+ glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
+ glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
+#elif defined(CEL_REND_BACKEND_VULKAN)
+ glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
+#endif
+
+ // glfw window creation
+ GLFWwindow* window = glfwCreateWindow(ren->config.scr_width, ren->config.scr_height,
+ ren->config.window_name, NULL, NULL);
+ if (window == NULL) {
+ // ERROR("Failed to create GLFW window\n");
+ glfwTerminate();
+ return false;
+ }
+ ren->window = window;
+
+ glfwMakeContextCurrent(ren->window);
+
+ DEBUG("Start gpu backend init");
+
+ if (!gpu_backend_init("Celeritas Engine - Vulkan", window)) {
+ FATAL("Couldnt load graphics api backend");
+ return false;
+ }
+ gpu_device_create(&ren->device); // TODO: handle errors
+ gpu_swapchain_create(&ren->swapchain);
+
+ DEBUG("Initialise GPU resource pools");
+ arena pool_arena = arena_create(malloc(1024 * 1024), 1024 * 1024);
+ ren->resource_pools = arena_alloc(&pool_arena, sizeof(struct resource_pools));
+ resource_pools_init(&pool_arena, ren->resource_pools);
+
+ // Create default rendering pipeline
+ default_pipelines_init(ren);
+
+ return true;
+}
+void renderer_shutdown(renderer* ren) {
+ gpu_swapchain_destroy(&ren->swapchain);
+ gpu_pipeline_destroy(&ren->static_opaque_pipeline);
+ gpu_backend_shutdown();
+}
+
+void default_pipelines_init(renderer* ren) {
+ // Static opaque geometry
+ arena scratch = arena_create(malloc(1024 * 1024), 1024 * 1024);
+
+ gpu_renderpass_desc pass_description = {};
+ gpu_renderpass* renderpass = gpu_renderpass_create(&pass_description);
+
+ ren->default_renderpass = *renderpass;
+
+ printf("Load shaders\n");
+ str8 vert_path, frag_path;
+#ifdef CEL_REND_BACKEND_OPENGL
+ vert_path = str8lit("assets/shaders/cube.vert");
+ frag_path = str8lit("assets/shaders/cube.frag");
+#else
+ vert_path = str8lit("build/linux/x86_64/debug/cube.vert.spv");
+ frag_path = str8lit("build/linux/x86_64/debug/cube.frag.spv");
+#endif
+ str8_opt vertex_shader = str8_from_file(&scratch, vert_path);
+ str8_opt fragment_shader = str8_from_file(&scratch, frag_path);
+ if (!vertex_shader.has_value || !fragment_shader.has_value) {
+ ERROR_EXIT("Failed to load shaders from disk")
+ }
+ if (!vertex_shader.has_value || !fragment_shader.has_value) {
+ ERROR_EXIT("Failed to load shaders from disk")
+ }
+
+ // Vertex attributes
+ vertex_description vertex_input = { 0 };
+ vertex_input.debug_label = "Standard Static 3D Vertex Format";
+ vertex_desc_add(&vertex_input, "inPosition", ATTR_F32x3);
+ vertex_desc_add(&vertex_input, "inNormal", ATTR_F32x3);
+ vertex_desc_add(&vertex_input, "inTexCoords", ATTR_F32x2);
+ vertex_input.use_full_vertex_size = true;
+
+ // Shader data bindings
+ shader_data mvp_uniforms_data = { .data = NULL, .shader_data_get_layout = &mvp_uniforms_layout };
+
+ struct graphics_pipeline_desc pipeline_description = {
+ .debug_name = "Basic Pipeline",
+ .vertex_desc = vertex_input,
+ .data_layouts = { mvp_uniforms_data },
+ .data_layouts_count = 1,
+ .vs = { .debug_name = "Basic Vertex Shader",
+ .filepath = vert_path,
+ .code = vertex_shader.contents,
+ .is_spirv = true },
+ .fs = { .debug_name = "Basic Fragment Shader",
+ .filepath = frag_path,
+ .code = fragment_shader.contents,
+ .is_spirv = true },
+ .renderpass = renderpass,
+ .wireframe = false,
+ .depth_test = false
+ };
+ gpu_pipeline* gfx_pipeline = gpu_graphics_pipeline_create(pipeline_description);
+ ren->static_opaque_pipeline = *gfx_pipeline;
+}
+
+void render_frame_begin(renderer* ren) {
+ ren->frame_aborted = false;
+ if (!gpu_backend_begin_frame()) {
+ ren->frame_aborted = true;
+ WARN("Frame aborted");
+ return;
+ }
+ gpu_cmd_encoder* enc = gpu_get_default_cmd_encoder();
+ // begin recording
+ gpu_cmd_encoder_begin(*enc);
+ gpu_cmd_encoder_begin_render(enc, &ren->default_renderpass);
+ encode_bind_pipeline(enc, PIPELINE_GRAPHICS, &ren->static_opaque_pipeline);
+ encode_set_default_settings(enc);
+}
+void render_frame_end(renderer* ren) {
+ if (ren->frame_aborted) {
+ return;
+ }
+ gpu_cmd_encoder* enc = gpu_get_default_cmd_encoder();
+ gpu_cmd_encoder_end_render(enc);
+ gpu_cmd_buffer buf = gpu_cmd_encoder_finish(enc);
+ gpu_queue_submit(&buf);
+ gpu_backend_end_frame();
+}
+void render_frame_draw(renderer* ren) {}
+
+bool mesh_has_indices(mesh* m) { return m->geometry->has_indices; }
+
+/**
+ *
+ * @param Camera used for getting the view projection matric to draw the mesh with.
+ * If NULL use the last used camera */
+void draw_mesh(mesh* mesh, mat4* model, camera* cam) { // , mat4* view, mat4* proj) {
+ gpu_cmd_encoder* enc = gpu_get_default_cmd_encoder();
+
+ encode_set_vertex_buffer(enc, mesh->vertex_buffer);
+ if (mesh_has_indices(mesh)) {
+ encode_set_index_buffer(enc, mesh->index_buffer);
+ }
+
+ mat4 view, proj;
+ if (cam) {
+ camera_view_projection(cam, // FIXME: proper swapchain dimensions
+ 1000, 1000, &view, &proj);
+
+ } else {
+ WARN("No camera set");
+ }
+ mvp_uniforms mvp_data = { .model = *model, .view = view, .projection = proj };
+ my_shader_bind_group shader_bind_data = { .mvp = mvp_data };
+ shader_data mvp_uniforms_data = { .data = &shader_bind_data,
+ .shader_data_get_layout = &mvp_uniforms_layout };
+ encode_bind_shader_data(enc, 0, &mvp_uniforms_data);
+
+ encode_draw_indexed(enc, mesh->geometry->indices->len);
+}
+
+void gfx_backend_draw_frame(renderer* ren, camera* camera, mat4 model, texture* tex) {}
+
+void geo_set_vertex_colours(geometry_data* geo, vec4 colour) {}
+
+// --- NEW
+
+mesh mesh_create(geometry_data* geometry, bool free_on_upload) {
+ mesh m = { 0 };
+
+ // Create and upload vertex buffer
+ size_t vert_bytes = geometry->vertices->len * sizeof(vertex);
+ INFO("Creating vertex buffer with size %d (%d x %d)", vert_bytes, geometry->vertices->len,
+ sizeof(vertex));
+ m.vertex_buffer = gpu_buffer_create(vert_bytes, CEL_BUFFER_VERTEX, CEL_BUFFER_FLAG_GPU,
+ geometry->vertices->data);
+
+ // Create and upload index buffer
+ size_t index_bytes = geometry->indices->len * sizeof(u32);
+ INFO("Creating index buffer with size %d (len: %d)", index_bytes, geometry->indices->len);
+ m.index_buffer = gpu_buffer_create(index_bytes, CEL_BUFFER_INDEX, CEL_BUFFER_FLAG_GPU,
+ geometry->indices->data);
+
+ m.is_uploaded = true;
+ // m.has_indices = geometry->has_indices;
+ // m.index_count = geometry->indices.len;
+ m.geometry = geometry;
+ if (free_on_upload) {
+ geo_free_data(geometry);
+ }
+
+ // TODO: materials?
+
+ return m;
+}
+
+// --- Textures
+
+texture_data texture_data_load(const char* path, bool invert_y) {
+ TRACE("Load texture %s", path);
+
+ // load the file data
+ int width, height, num_channels;
+ stbi_set_flip_vertically_on_load(invert_y);
+
+#pragma GCC diagnostic ignored "-Wpointer-sign"
+ char* data = stbi_load(path, &width, &height, &num_channels, STBI_rgb_alpha);
+ if (data) {
+ DEBUG("loaded texture: %s", path);
+ } else {
+ WARN("failed to load texture");
+ }
+
+ unsigned int channel_type;
+ if (num_channels == 4) {
+ channel_type = GL_RGBA;
+ } else {
+ channel_type = GL_RGB;
+ }
+ texture_desc desc = { .extents = { width, height },
+ .format = CEL_TEXTURE_FORMAT_8_8_8_8_RGBA_UNORM,
+ .tex_type = CEL_TEXTURE_TYPE_2D };
+
+ return (texture_data){ .description = desc, .image_data = data };
+}
+
+texture_handle texture_data_upload(texture_data data, bool free_on_upload) {
+ texture_handle handle = gpu_texture_create(data.description, true, data.image_data);
+ if (free_on_upload) {
+ TRACE("Freed stb_image data");
+ stbi_image_free(data.image_data);
+ }
+ return handle;
+}
+
+/** @brief load all of the texture for a PBR material and returns an unnamed material */
+material pbr_material_load(char* albedo_path, char* normal_path, bool metal_roughness_combined,
+ char* metallic_path, char* roughness_map, char* ao_map) {
+ material m = { 0 };
+ m.kind = MAT_PBR;
+
+ // For now we must have the required textures
+ assert(albedo_path);
+ assert(normal_path);
+ assert(metallic_path);
+ assert(metal_roughness_combined);
+
+ m.mat_data.pbr.metal_roughness_combined = metal_roughness_combined;
+ texture_data tex_data;
+ tex_data = texture_data_load(albedo_path, false);
+ m.mat_data.pbr.albedo_map = texture_data_upload(tex_data, true);
+ tex_data = texture_data_load(normal_path, false);
+ m.mat_data.pbr.normal_map = texture_data_upload(tex_data, true);
+ tex_data = texture_data_load(metallic_path, false);
+ m.mat_data.pbr.metallic_map = texture_data_upload(tex_data, true);
+
+ return m;
+}
diff --git a/src/render/render.h b/src/render/render.h
new file mode 100644
index 0000000..19a8d1a
--- /dev/null
+++ b/src/render/render.h
@@ -0,0 +1,96 @@
+/**
+ * @file render.h
+ * @author your name (you@domain.com)
+ * @brief Renderer frontend
+ * @version 0.1
+ * @date 2024-03-21
+ *
+ * @copyright Copyright (c) 2024
+ *
+ */
+#pragma once
+
+#include "file.h"
+#include "ral_types.h"
+#include "render_types.h"
+
+/** @brief configuration passed to the renderer at init time */
+typedef struct renderer_config {
+ char window_name[256];
+ u32 scr_width, scr_height;
+ vec3 clear_colour; /** colour that the screen gets cleared to every frame */
+} renderer_config;
+
+typedef struct renderer {
+ struct GLFWwindow* window;
+ void* backend_context;
+ renderer_config config;
+ gpu_device device;
+ gpu_swapchain swapchain;
+ gpu_renderpass default_renderpass;
+ gpu_pipeline static_opaque_pipeline;
+ bool frame_aborted;
+ struct resource_pools* resource_pools;
+} renderer;
+
+bool renderer_init(renderer* ren);
+void renderer_shutdown(renderer* ren);
+
+void render_frame_begin(renderer* ren);
+void render_frame_update_globals(renderer* ren);
+void render_frame_end(renderer* ren);
+void render_frame_draw(renderer* ren);
+
+// ! TEMP
+typedef struct camera camera;
+void gfx_backend_draw_frame(renderer* ren, camera* camera, mat4 model, texture* tex);
+
+typedef struct render_ctx {
+ mat4 view;
+ mat4 projection;
+} render_ctx;
+
+// frontend -- these can be called from say a loop in an example, or via FFI
+texture_handle texture_create(const char* debug_name, texture_desc description, const u8* data);
+
+// Frontend Resources
+texture_data texture_data_load(const char* path, bool invert_y);
+
+/**
+ * @brief
+ *
+ * @param data
+ * @param free_on_upload frees the CPU-side pixel data stored in `data`
+ * @return texture_handle
+ */
+texture_handle texture_data_upload(texture_data data, bool free_on_upload);
+
+/** @brief load all of the texture for a PBR material and returns an unnamed material */
+material pbr_material_load(char* albedo_path, char* normal_path, bool metal_roughness_combined,
+ char* metallic_path, char* roughness_map, char* ao_map);
+
+buffer_handle buffer_create(const char* debug_name, u64 size);
+bool buffer_destroy(buffer_handle buffer);
+sampler_handle sampler_create();
+
+// models and meshes are implemented **in terms of the above**
+
+/**
+ * @brief Creates buffers and returns a struct that holds handles to our resources
+ *
+ * @param geometry
+ * @param free_on_upload frees the CPU-side vertex/index data stored in `geometry` when we
+ successfully upload that data to the GPU-side buffer
+ * @return mesh
+ */
+mesh mesh_create(geometry_data* geometry, bool free_on_upload);
+void mesh_delete(mesh* mesh); // TODO
+
+void draw_mesh(mesh* mesh, mat4* model, camera* cam);
+
+model_handle model_load(const char* debug_name, const char* filepath);
+
+void geo_free_data(geometry_data* geo);
+void geo_set_vertex_colours(geometry_data* geo, vec4 colour);
+
+vertex_description static_3d_vertex_description();
diff --git a/src/render/render_types.h b/src/render/render_types.h
new file mode 100644
index 0000000..b25fa14
--- /dev/null
+++ b/src/render/render_types.h
@@ -0,0 +1,181 @@
+/**
+ * @file render_types.h
+ * @author your name (you@domain.com)
+ * @brief
+ * @version 0.1
+ * @date 2024-04-27
+ *
+ * @copyright Copyright (c) 2024
+ *
+ */
+#pragma once
+
+#include "colours.h"
+#include "defines.h"
+#include "ral.h"
+#include "ral_types.h"
+#if defined(CEL_PLATFORM_WINDOWS)
+// #include "backend_dx11.h"
+#endif
+#if defined(CEL_REND_BACKEND_VULKAN)
+#include "backend_vulkan.h"
+#elif defined(CEL_REND_BACKEND_METAL)
+#include "backend_metal.h"
+#elif defined(CEL_REND_BACKEND_OPENGL)
+#include "backend_opengl.h"
+#endif
+
+struct GLFWwindow;
+
+typedef struct geometry_data {
+ vertex_format format;
+ vertex_darray* vertices; // TODO: make it not a pointer
+ bool has_indices;
+ u32_darray* indices;
+ rgba colour; /** Optional: set vertex colours */
+} geometry_data;
+
+typedef struct u32_opt {
+ u32 value;
+ bool has_value;
+} u32_opt;
+
+// 'Upload' a geometry_data (to GPU) -> get back a mesh
+typedef struct mesh {
+ buffer_handle vertex_buffer;
+ buffer_handle index_buffer;
+ geometry_data* geometry; // NULL means it has been freed
+ u32_opt material_index;
+ bool is_uploaded;
+ bool is_latent;
+} mesh;
+
+#ifndef TYPED_MESH_ARRAY
+KITC_DECL_TYPED_ARRAY(mesh)
+#define TYPED_MESH_ARRAY
+#endif
+
+/* Hot reloading:
+C side - reload_model():
+ - load model from disk using existing loader
+ - remove from transform graph so it isnt tried to be drawn
+*/
+
+typedef struct texture {
+} texture;
+
+typedef struct texture_data {
+ texture_desc description;
+ void* image_data;
+} texture_data;
+
+typedef enum material_kind {
+ MAT_BLINN_PHONG,
+ MAT_PBR,
+ MAT_PBR_PARAMS, // uses float values to represent a surface uniformly
+ MAT_COUNT
+} material_kind;
+static const char* material_kind_names[] = { "Blinn Phong", "PBR (Textures)", "PBR (Params)",
+ "Count (This should be an error)" };
+
+typedef struct blinn_phong_material {
+ char name[256];
+ texture diffuse_texture;
+ char diffuse_tex_path[256];
+ texture specular_texture;
+ char specular_tex_path[256];
+ vec3 ambient_colour;
+ vec3 diffuse;
+ vec3 specular;
+ f32 spec_exponent;
+ bool is_loaded;
+ bool is_uploaded;
+} blinn_phong_material;
+// typedef blinn_phong_material material;
+
+typedef struct pbr_parameters {
+ vec3 albedo;
+ f32 metallic;
+ f32 roughness;
+ f32 ao;
+} pbr_parameters;
+
+typedef struct pbr_material {
+ texture_handle albedo_map;
+ texture_handle normal_map;
+ bool metal_roughness_combined;
+ texture_handle metallic_map;
+ texture_handle roughness_map;
+ texture_handle ao_map;
+} pbr_material;
+
+typedef struct material {
+ material_kind kind;
+ union {
+ blinn_phong_material blinn_phong;
+ pbr_parameters pbr_params;
+ pbr_material pbr;
+ } mat_data;
+ char* name;
+} material;
+
+#ifndef TYPED_MATERIAL_ARRAY
+KITC_DECL_TYPED_ARRAY(material)
+#define TYPED_MATERIAL_ARRAY
+#endif
+
+CORE_DEFINE_HANDLE(model_handle);
+
+typedef struct model {
+ str8 name;
+ mesh_darray* meshes;
+ material_darray* materials;
+} model;
+
+TYPED_POOL(model, model)
+
+// FIXME: the default blinn-phong material. MUST be initialised with the function below
+// FIXME: extern material DEFAULT_MATERIAL;
+void default_material_init();
+
+#ifndef TYPED_MODEL_ARRAY
+KITC_DECL_TYPED_ARRAY(model)
+#define TYPED_MODEL_ARRAY
+#endif
+
+#ifndef TYPED_ANIMATION_CLIP_ARRAY
+#include "animation.h"
+KITC_DECL_TYPED_ARRAY(animation_clip)
+#define TYPED_ANIMATION_CLIP_ARRAY
+#endif
+
+/** @brief Describes all the data required for the renderer to start executing draws */
+typedef struct render_entity {
+ /* buffer_handle index_buffer; */
+ /* u32 index_count; */
+ /* u32 index_offset; */
+ /* buffer_handle vertex_buffer; */
+ model_handle model;
+ transform tf;
+} render_entity;
+
+#ifndef TYPED_RENDER_ENTITY_ARRAY
+KITC_DECL_TYPED_ARRAY(render_entity)
+#define TYPED_RENDER_ENTITY_ARRAY
+#endif
+
+// --- Lights
+typedef struct point_light {
+ vec3 position;
+ f32 constant, linear, quadratic;
+ vec3 ambient;
+ vec3 diffuse;
+ vec3 specular;
+} point_light;
+
+typedef struct directional_light {
+ vec3 direction;
+ vec3 ambient;
+ vec3 diffuse;
+ vec3 specular;
+} directional_light;
diff --git a/src/render/renderpasses.c b/src/render/renderpasses.c
new file mode 100644
index 0000000..b93d487
--- /dev/null
+++ b/src/render/renderpasses.c
@@ -0,0 +1,140 @@
+/**
+ * @file renderpasses.c
+ * @author your name (you@domain.com)
+ * @brief
+ * @version 0.1
+ * @date 2024-06-22
+ *
+ * @copyright Copyright (c) 2024
+ *
+ */
+
+#include "renderpasses.h"
+#include "file.h"
+#include "log.h"
+#include "maths_types.h"
+#include "ral.h"
+#include "ral_types.h"
+
+#define SHADOW_WIDTH 1000
+#define SHADOW_HEIGHT 1000
+
+shader_data_layout debug_quad_layout(void* data) {
+ debug_quad_uniform* d = data;
+ bool has_data = data != NULL;
+
+ shader_binding b1 = { .label = "depthMap",
+ .type = SHADER_BINDING_TEXTURE,
+ .stores_data = has_data };
+ if (has_data) {
+ b1.data.texture.handle = d->depthMap;
+ }
+ return (
+ shader_data_layout){ .name = "debug quad uniforms", .bindings = { b1 }, .bindings_count = 1 };
+}
+
+gpu_pipeline* debug_quad_pipeline_create() {
+ gpu_renderpass_desc rpass_desc = { .default_framebuffer = true };
+ gpu_renderpass* rpass = gpu_renderpass_create(&rpass_desc);
+ shader_data shader_layout = { .data = NULL, .shader_data_get_layout = debug_quad_layout };
+ struct graphics_pipeline_desc desc = { .debug_name = "Shadow maps debug quad",
+ .vertex_desc = static_3d_vertex_description(),
+ .data_layouts = { shader_layout },
+ .data_layouts_count = 1,
+ .vs = shader_quick_load("assets/shaders/debug_quad.vert"),
+ .fs = shader_quick_load("assets/shaders/debug_quad.frag"),
+ .renderpass = rpass,
+ .wireframe = false };
+
+ return gpu_graphics_pipeline_create(desc);
+}
+
+void ren_shadowmaps_init(ren_shadowmaps* storage) {
+ storage->rpass = shadowmaps_renderpass_create();
+ storage->static_pipeline = shadowmaps_pipeline_create(storage->rpass);
+ storage->debug_quad = debug_quad_pipeline_create();
+ storage->depth_tex = storage->rpass->description.depth_stencil;
+}
+
+gpu_renderpass* shadowmaps_renderpass_create() {
+ // Create depthmap texture
+ u32x2 extents = u32x2(SHADOW_WIDTH, SHADOW_HEIGHT);
+ texture_desc depthmap_desc = { .extents = extents,
+ .format = CEL_TEXTURE_FORMAT_DEPTH_DEFAULT,
+ .tex_type = CEL_TEXTURE_TYPE_2D };
+ texture_handle depthmap = gpu_texture_create(depthmap_desc, false, NULL);
+
+ gpu_renderpass_desc shadows_desc = { .default_framebuffer = false,
+ .has_color_target = false,
+ .has_depth_stencil = true,
+ .depth_stencil = depthmap };
+ return gpu_renderpass_create(&shadows_desc);
+}
+
+// == shader bindings
+
+shader_data_layout model_uniform_layout(void* data) {
+ bool has_data = data != NULL;
+
+ shader_binding b1 = { .label = "Model",
+ .type = SHADER_BINDING_BYTES,
+ .stores_data = has_data,
+ .data = { .bytes.size = sizeof(model_uniform) } };
+ if (has_data) {
+ b1.data.bytes.data = data;
+ }
+ return (shader_data_layout){ .name = "model_uniform", .bindings = { b1 }, .bindings_count = 1 };
+}
+shader_data_layout lightspace_uniform_layout(void* data) {
+ bool has_data = data != NULL;
+
+ shader_binding b1 = { .label = "LightSpace",
+ .type = SHADER_BINDING_BYTES,
+ .stores_data = has_data,
+ .data = { .bytes.size = sizeof(lightspace_tf_uniform) } };
+ if (has_data) {
+ b1.data.bytes.data = data;
+ }
+ return (shader_data_layout){ .name = "lightspace_tf_uniform",
+ .bindings = { b1 },
+ .bindings_count = 1 };
+}
+
+// ==================
+
+gpu_pipeline* shadowmaps_pipeline_create(gpu_renderpass* rpass) {
+ arena scratch = arena_create(malloc(1024 * 1024), 1024 * 1024);
+
+ str8 vert_path = str8lit("assets/shaders/shadows.vert");
+ str8 frag_path = str8lit("assets/shaders/shadows.frag");
+ str8_opt vertex_shader = str8_from_file(&scratch, vert_path);
+ str8_opt fragment_shader = str8_from_file(&scratch, frag_path);
+ if (!vertex_shader.has_value || !fragment_shader.has_value) {
+ ERROR_EXIT("Failed to load shaders from disk");
+ }
+
+ // We'll have two data layouts. 1. for the light-space transform, and 2. for the model matrix
+ shader_data model_uniform = { .data = NULL, .shader_data_get_layout = &model_uniform_layout };
+ shader_data lightspace_uniform = { .data = NULL,
+ .shader_data_get_layout = &lightspace_uniform_layout };
+
+ struct graphics_pipeline_desc desc = { .debug_name = "Shadowmap drawing pipeline",
+ .vertex_desc = static_3d_vertex_description(),
+ .data_layouts = { model_uniform, lightspace_uniform },
+ .data_layouts_count = 2,
+ .vs = { .debug_name = "Shadows Vert shader",
+ .filepath = vert_path,
+ .code = vertex_shader.contents,
+ .is_spirv = true },
+ .fs = { .debug_name = "Shadows Frag shader",
+ .filepath = frag_path,
+ .code = fragment_shader.contents,
+ .is_spirv = true },
+ .renderpass = rpass };
+
+ arena_free_storage(&scratch);
+ return gpu_graphics_pipeline_create(desc);
+}
+
+void renderpass_shadowmap_execute(gpu_renderpass* pass, render_entity* entities,
+ size_t entity_count) {}
diff --git a/src/render/renderpasses.h b/src/render/renderpasses.h
new file mode 100644
index 0000000..5a5ffee
--- /dev/null
+++ b/src/render/renderpasses.h
@@ -0,0 +1,56 @@
+/**
+ * @file renderpasses.h
+ * @author your name (you@domain.com)
+ * @brief Built-in renderpasses to the engine
+ * @version 0.1
+ * @date 2024-04-28
+ *
+ * @copyright Copyright (c) 2024
+ *
+ */
+#pragma once
+#include "ral.h"
+#include "ral_types.h"
+#include "render_types.h"
+
+// Shadowmap pass
+// Blinn-phong pass
+// Unlit pass
+// Debug visualisations pass
+
+// Don't need to pass in *anything*.
+gpu_renderpass* renderpass_blinn_phong_create();
+void renderpass_blinn_phong_execute(gpu_renderpass* pass, render_entity* entities,
+ size_t entity_count);
+
+
+typedef struct ren_shadowmaps {
+ u32 width;
+ u32 height;
+ gpu_renderpass* rpass;
+ gpu_pipeline* static_pipeline;
+ gpu_pipeline* debug_quad;
+ texture_handle depth_tex;
+} ren_shadowmaps;
+
+typedef struct model_uniform {
+ mat4 model;
+} model_uniform;
+typedef struct lightspace_tf_uniform {
+ mat4 lightSpaceMatrix;
+} lightspace_tf_uniform;
+
+typedef struct debug_quad_uniform {
+ texture_handle depthMap;
+} debug_quad_uniform;
+
+shader_data_layout model_uniform_layout(void* data);
+shader_data_layout lightspace_uniform_layout(void* data);
+shader_data_layout debug_quad_layout(void* data);
+
+void ren_shadowmaps_init(ren_shadowmaps* storage);
+
+gpu_renderpass* shadowmaps_renderpass_create();
+gpu_pipeline* shadowmaps_pipeline_create(gpu_renderpass* rpass);
+
+void renderpass_shadowmap_execute(gpu_renderpass* pass, render_entity* entities, size_t entity_count);
diff --git a/src/render/static_pipeline.h b/src/render/static_pipeline.h
new file mode 100644
index 0000000..bf5bc42
--- /dev/null
+++ b/src/render/static_pipeline.h
@@ -0,0 +1,30 @@
+#pragma once
+#include "defines.h"
+#include "maths_types.h"
+#include "ral.h"
+#include "ral_types.h"
+#include "render_types.h"
+
+typedef struct mvp_uniforms {
+ mat4 model;
+ mat4 view;
+ mat4 projection;
+} mvp_uniforms;
+typedef struct my_shader_bind_group {
+ mvp_uniforms mvp;
+} my_shader_bind_group;
+
+static shader_data_layout mvp_uniforms_layout(void* data) {
+ my_shader_bind_group* d = (my_shader_bind_group*)data;
+ bool has_data = data != NULL;
+
+ shader_binding b1 = { .label = "Matrices",
+ .type = SHADER_BINDING_BYTES,
+ .stores_data = has_data,
+ .data = { .bytes = { .size = sizeof(mvp_uniforms) } } };
+
+ if (has_data) {
+ b1.data.bytes.data = &d->mvp;
+ }
+ return (shader_data_layout){ .name = "global_ubo", .bindings = { b1 }, .bindings_count = 1 };
+}