summaryrefslogtreecommitdiff
path: root/archive/src/render
diff options
context:
space:
mode:
Diffstat (limited to 'archive/src/render')
-rw-r--r--archive/src/render/archive/backends/backend_test.c1
-rw-r--r--archive/src/render/archive/backends/metal/README.md1
-rw-r--r--archive/src/render/archive/backends/metal/backend_metal.h74
-rw-r--r--archive/src/render/archive/backends/metal/backend_metal.m285
-rw-r--r--archive/src/render/archive/backends/opengl/backend_opengl.c521
-rw-r--r--archive/src/render/archive/backends/opengl/backend_opengl.h68
-rw-r--r--archive/src/render/archive/backends/vulkan/README.md1
-rw-r--r--archive/src/render/archive/backends/vulkan/backend_vulkan.c1705
-rw-r--r--archive/src/render/archive/backends/vulkan/backend_vulkan.h118
-rw-r--r--archive/src/render/immdraw.c176
-rw-r--r--archive/src/render/immdraw.h63
-rw-r--r--archive/src/render/pbr.c266
-rw-r--r--archive/src/render/pbr.h70
-rw-r--r--archive/src/render/render.c359
-rw-r--r--archive/src/render/render.h151
-rw-r--r--archive/src/render/render_types.h138
-rw-r--r--archive/src/render/shader_layouts.h70
-rw-r--r--archive/src/render/shadows.c211
-rw-r--r--archive/src/render/shadows.h48
-rw-r--r--archive/src/render/skybox.c161
-rw-r--r--archive/src/render/skybox.h41
21 files changed, 4528 insertions, 0 deletions
diff --git a/archive/src/render/archive/backends/backend_test.c b/archive/src/render/archive/backends/backend_test.c
new file mode 100644
index 0000000..6347e27
--- /dev/null
+++ b/archive/src/render/archive/backends/backend_test.c
@@ -0,0 +1 @@
+// #FUTURE \ No newline at end of file
diff --git a/archive/src/render/archive/backends/metal/README.md b/archive/src/render/archive/backends/metal/README.md
new file mode 100644
index 0000000..f87f5c1
--- /dev/null
+++ b/archive/src/render/archive/backends/metal/README.md
@@ -0,0 +1 @@
+# TODO \ No newline at end of file
diff --git a/archive/src/render/archive/backends/metal/backend_metal.h b/archive/src/render/archive/backends/metal/backend_metal.h
new file mode 100644
index 0000000..9561bb6
--- /dev/null
+++ b/archive/src/render/archive/backends/metal/backend_metal.h
@@ -0,0 +1,74 @@
+#pragma once
+// #define CEL_REND_BACKEND_METAL
+#if defined(CEL_REND_BACKEND_METAL)
+
+#include "defines.h"
+#include "maths_types.h"
+#ifdef __OBJC__
+#import <Foundation/Foundation.h>
+#import <Metal/Metal.h>
+#import <MetalKit/MetalKit.h>
+#import <QuartzCore/CAMetalLayer.h>
+#else
+typedef void* id;
+#endif
+
+typedef struct gpu_swapchain {
+ u32x2 dimensions;
+#ifdef __OBJC__
+ CAMetalLayer* swapchain;
+#else
+ void* swapchain;
+#endif
+} gpu_swapchain;
+typedef struct gpu_device {
+/** @brief `device` gives us access to our GPU */
+#ifdef __OBJC__
+ id<MTLDevice> id;
+#else
+ void* id;
+#endif
+} gpu_device;
+typedef struct gpu_pipeline_layout {
+ void* pad;
+} gpu_pipeline_layout;
+typedef struct gpu_pipeline {
+#ifdef __OBJC__
+ id<MTLRenderPipelineState> pipeline_state;
+#else
+ void* pipeline_state;
+#endif
+} gpu_pipeline;
+typedef struct gpu_renderpass {
+#ifdef __OBJC__
+ MTLRenderPassDescriptor* rpass_descriptor;
+#else
+ void* rpass_descriptor;
+#endif
+} gpu_renderpass;
+typedef struct gpu_cmd_encoder {
+#ifdef __OBJC__
+ id<MTLCommandBuffer> cmd_buffer;
+ id<MTLRenderCommandEncoder> render_encoder;
+#else
+ void* cmd_buffer;
+ void* render_encoder;
+#endif
+} gpu_cmd_encoder;
+typedef struct gpu_cmd_buffer {
+ void* pad;
+} gpu_cmd_buffer;
+
+typedef struct gpu_buffer {
+#ifdef __OBJC__
+ id<MTLBuffer> id;
+#else
+ void* id;
+#endif
+ u64 size;
+} gpu_buffer;
+typedef struct gpu_texture {
+ void* pad;
+} gpu_texture;
+
+#endif \ No newline at end of file
diff --git a/archive/src/render/archive/backends/metal/backend_metal.m b/archive/src/render/archive/backends/metal/backend_metal.m
new file mode 100644
index 0000000..4787755
--- /dev/null
+++ b/archive/src/render/archive/backends/metal/backend_metal.m
@@ -0,0 +1,285 @@
+#include <assert.h>
+// #define CEL_REND_BACKEND_METAL
+#if defined(CEL_REND_BACKEND_METAL)
+#include <stddef.h>
+#include "ral_types.h"
+#include "colours.h"
+#include <stdlib.h>
+#include "camera.h"
+#include "defines.h"
+#include "file.h"
+#include "log.h"
+#include "maths_types.h"
+#include "ral.h"
+
+#define GLFW_INCLUDE_NONE
+#define GLFW_EXPOSE_NATIVE_COCOA
+
+#include <GLFW/glfw3.h>
+#include <GLFW/glfw3native.h>
+
+#import <Foundation/Foundation.h>
+#import <Metal/Metal.h>
+#import <MetalKit/MetalKit.h>
+#import <QuartzCore/CAMetalLayer.h>
+#include "backend_metal.h"
+
+// --- Handy macros
+#define BUFFER_GET(h) (buffer_pool_get(&context.resource_pools->buffers, h))
+#define TEXTURE_GET(h) (texture_pool_get(&context.resource_pools->textures, h))
+
+typedef struct metal_context {
+ GLFWwindow* window;
+ NSWindow* metal_window;
+ arena pool_arena;
+
+ gpu_device* device;
+ gpu_swapchain* swapchain;
+ id<CAMetalDrawable> surface;
+
+ id<MTLCommandQueue> command_queue;
+ gpu_cmd_encoder main_command_buf;
+ gpu_backend_pools gpu_pools;
+ struct resource_pools* resource_pools;
+} metal_context;
+
+static metal_context context;
+
+struct GLFWwindow;
+
+bool gpu_backend_init(const char *window_name, struct GLFWwindow *window) {
+ INFO("loading Metal backend");
+
+ memset(&context, 0, sizeof(metal_context));
+ context.window = window;
+
+ size_t pool_buffer_size = 1024 * 1024;
+ context.pool_arena = arena_create(malloc(pool_buffer_size), pool_buffer_size);
+
+ backend_pools_init(&context.pool_arena, &context.gpu_pools);
+ context.resource_pools = malloc(sizeof(struct resource_pools));
+ resource_pools_init(&context.pool_arena, context.resource_pools);
+
+ glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
+
+ glfwMakeContextCurrent(window);
+ // FIXME: glfwSetFramebufferSizeCallback(ren->window, framebuffer_size_callback);
+
+ // get a NSWindow pointer from GLFWwindow
+ NSWindow *nswindow = glfwGetCocoaWindow(window);
+ context.metal_window = nswindow;
+
+ // const id<MTLCommandQueue> queue = [gpu newCommandQueue];
+ // CAMetalLayer *swapchain = [CAMetalLayer layer];
+ // swapchain.device = gpu;
+ // swapchain.opaque = YES;
+
+ // // set swapchain for the window
+ // nswindow.contentView.layer = swapchain;
+ // nswindow.contentView.wantsLayer = YES;
+
+ // MTLClearColor color = MTLClearColorMake(0.7, 0.1, 0.2, 1.0);
+
+ // // set all our state properties
+ // state->device = gpu;
+ // state->cmd_queue = queue;
+ // state->swapchain = swapchain;
+ // state->clear_color = color;
+
+ // NSError *err = 0x0; // TEMPORARY
+
+ // WARN("About to try loading metallib");
+ // id<MTLLibrary> defaultLibrary = [state->device newLibraryWithFile: @"build/gfx.metallib" error:&err];
+ // CASSERT(defaultLibrary);
+ // state->default_lib = defaultLibrary;
+ // if (!state->default_lib) {
+ // NSLog(@"Failed to load library");
+ // exit(0);
+ // }
+
+ // create_render_pipeline(state);
+
+ return true;
+}
+
+void gpu_backend_shutdown() {}
+
+bool gpu_device_create(gpu_device* out_device) {
+ TRACE("GPU Device creation");
+ const id<MTLDevice> gpu = MTLCreateSystemDefaultDevice();
+ out_device->id = gpu;
+ context.device = out_device;
+
+ const id<MTLCommandQueue> queue = [gpu newCommandQueue];
+ context.command_queue = queue;
+
+ return true;
+}
+void gpu_device_destroy() {}
+
+// --- Render Pipeline
+gpu_pipeline* gpu_graphics_pipeline_create(struct graphics_pipeline_desc description) {
+ TRACE("GPU Graphics Pipeline creation");
+ // Allocate
+ // gpu_pipeline_layout* layout =
+ // pipeline_layout_pool_alloc(&context.gpu_pools.pipeline_layouts, NULL);
+ gpu_pipeline* pipeline = pipeline_pool_alloc(&context.gpu_pools.pipelines, NULL);
+
+ WARN("About to try loading metallib");
+ assert(description.vs.is_combined_vert_frag);
+ // Ignore fragment shader data, as vert shader data contains both
+ NSError *err = 0x0; // TEMPORARY
+ NSString *myNSString = [NSString stringWithUTF8String:(char*)description.vs.filepath.buf];
+ id<MTLLibrary> default_library = [context.device->id newLibraryWithFile:myNSString error:&err];
+ assert(default_library);
+
+ // setup vertex and fragment shaders
+ id<MTLFunction> ren_vert = [default_library newFunctionWithName:@"basic_vertex"];
+ assert(ren_vert);
+ id<MTLFunction> ren_frag = [default_library newFunctionWithName:@"basic_fragment"];
+ assert(ren_frag);
+
+ // create pipeline descriptor
+ @autoreleasepool {
+ NSError *err = 0x0;
+ MTLRenderPipelineDescriptor *pld = [[MTLRenderPipelineDescriptor alloc] init];
+ NSString *pipeline_name = [NSString stringWithUTF8String: description.debug_name];
+ pld.label = pipeline_name;
+ pld.vertexFunction = ren_vert;
+ pld.fragmentFunction = ren_frag;
+ pld.colorAttachments[0].pixelFormat = MTLPixelFormatBGRA8Unorm;
+ pld.colorAttachments[0].blendingEnabled = YES;
+
+ MTLDepthStencilDescriptor *depthStencilDescriptor = [MTLDepthStencilDescriptor new];
+ depthStencilDescriptor.depthCompareFunction = MTLCompareFunctionLess;
+ depthStencilDescriptor.depthWriteEnabled = YES;
+ pld.depthAttachmentPixelFormat = MTLPixelFormatDepth32Float_Stencil8;
+
+ id<MTLDepthStencilState> depth_descriptor = [context.device->id newDepthStencilStateWithDescriptor:depthStencilDescriptor];
+ // FIXME: state->depth_state = depth_descriptor;
+
+ id<MTLRenderPipelineState> pipeline_state = [context.device->id newRenderPipelineStateWithDescriptor:pld error:&err];
+ TRACE("created renderpipelinestate");
+ pipeline->pipeline_state = pipeline_state;
+
+ }
+
+ return pipeline;
+}
+void gpu_pipeline_destroy(gpu_pipeline* pipeline) {}
+
+// --- Renderpass
+gpu_renderpass* gpu_renderpass_create(const gpu_renderpass_desc* description) {
+ gpu_renderpass* renderpass = renderpass_pool_alloc(&context.gpu_pools.renderpasses, NULL);
+
+ // TODO: Configure based on description
+ // set up render pass
+ context.surface = [context.swapchain->swapchain nextDrawable];
+ MTLRenderPassDescriptor *renderPassDescriptor = [[MTLRenderPassDescriptor alloc] init];
+ MTLRenderPassColorAttachmentDescriptor *cd = renderPassDescriptor.colorAttachments[0];
+ [cd setTexture:context.surface.texture];
+ [cd setLoadAction:MTLLoadActionClear];
+ MTLClearColor clearColor = MTLClearColorMake(0.1, 0.1, 0.0, 1.0);
+ [cd setClearColor:clearColor];
+ [cd setStoreAction:MTLStoreActionStore];
+
+ renderpass->rpass_descriptor = renderPassDescriptor;
+
+ return renderpass;
+}
+
+void gpu_renderpass_destroy(gpu_renderpass* pass) {}
+
+// --- Swapchain
+bool gpu_swapchain_create(gpu_swapchain* out_swapchain) {
+ TRACE("GPU Swapchain creation");
+ CAMetalLayer *swapchain = [CAMetalLayer layer];
+ swapchain.device = context.device->id;
+ swapchain.opaque = YES;
+ out_swapchain->swapchain = swapchain;
+
+ // set swapchain for the window
+ context.metal_window.contentView.layer = swapchain;
+ context.metal_window.contentView.wantsLayer = YES;
+
+ context.swapchain = out_swapchain;
+ return true;
+}
+void gpu_swapchain_destroy(gpu_swapchain* swapchain) {}
+
+// --- Command buffer
+gpu_cmd_encoder gpu_cmd_encoder_create() {
+ id <MTLCommandBuffer> cmd_buffer = [context.command_queue commandBuffer];
+
+ return (gpu_cmd_encoder) {
+ .cmd_buffer = cmd_buffer
+ };
+}
+void gpu_cmd_encoder_destroy(gpu_cmd_encoder* encoder) {}
+void gpu_cmd_encoder_begin(gpu_cmd_encoder encoder) { /* no-op */ }
+void gpu_cmd_encoder_begin_render(gpu_cmd_encoder* encoder, gpu_renderpass* renderpass) {
+ DEBUG("Create Render Command Encoder");
+ id<MTLRenderCommandEncoder> render_encoder = [encoder->cmd_buffer renderCommandEncoderWithDescriptor:renderpass->rpass_descriptor];
+ encoder->render_encoder = render_encoder;
+ // [encoder setDepthStencilState:state->depth_state];
+}
+void gpu_cmd_encoder_end_render(gpu_cmd_encoder* encoder) {}
+void gpu_cmd_encoder_begin_compute() {}
+gpu_cmd_encoder* gpu_get_default_cmd_encoder() {
+ return &context.main_command_buf;
+}
+
+/** @brief Finish recording and return a command buffer that can be submitted to a queue */
+gpu_cmd_buffer gpu_cmd_encoder_finish(gpu_cmd_encoder* encoder) {}
+
+void gpu_queue_submit(gpu_cmd_buffer* buffer) {}
+
+void encode_buffer_copy(gpu_cmd_encoder* encoder, buffer_handle src, u64 src_offset,
+ buffer_handle dst, u64 dst_offset, u64 copy_size);
+void buffer_upload_bytes(buffer_handle gpu_buf, bytebuffer cpu_buf, u64 offset, u64 size);
+
+void copy_buffer_to_buffer_oneshot(buffer_handle src, u64 src_offset, buffer_handle dst,
+ u64 dst_offset, u64 copy_size);
+void copy_buffer_to_image_oneshot(buffer_handle src, texture_handle dst);
+
+void encode_bind_pipeline(gpu_cmd_encoder* encoder, pipeline_kind kind, gpu_pipeline* pipeline) {}
+void encode_bind_shader_data(gpu_cmd_encoder* encoder, u32 group, shader_data* data) {}
+void encode_set_default_settings(gpu_cmd_encoder* encoder) {
+ [encoder->render_encoder setCullMode:MTLCullModeBack];
+}
+void encode_set_vertex_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) {
+ gpu_buffer* vertex_buf = BUFFER_GET(buf);
+ [encoder->render_encoder setVertexBuffer:vertex_buf->id offset:0 atIndex:0];
+}
+void encode_set_index_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) {}
+void encode_set_bind_group() {}
+void encode_draw(gpu_cmd_encoder* encoder) {}
+void encode_draw_indexed(gpu_cmd_encoder* encoder, u64 index_count) {}
+void encode_clear_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) {}
+
+buffer_handle gpu_buffer_create(u64 size, gpu_buffer_type buf_type, gpu_buffer_flags flags,
+ const void* data) {
+ buffer_handle handle;
+ gpu_buffer* buffer = buffer_pool_alloc(&context.resource_pools->buffers, &handle);
+ buffer->size = size;
+
+ id<MTLBuffer> mtl_vert_buf = [context.device->id newBufferWithBytes:data
+ length: size
+ options:MTLResourceStorageModeShared];
+ return handle;
+}
+void gpu_buffer_destroy(buffer_handle buffer) {}
+void gpu_buffer_upload(const void* data) {}
+
+texture_handle gpu_texture_create(texture_desc desc, bool create_view, const void* data) {}
+void gpu_texture_destroy(texture_handle) {}
+void gpu_texture_upload(texture_handle texture, const void* data) {}
+
+bool gpu_backend_begin_frame() {
+ context.main_command_buf.cmd_buffer = [context.command_queue commandBuffer];
+ return true;
+ }
+void gpu_backend_end_frame() {}
+void gpu_temp_draw(size_t n_verts) {}
+
+#endif \ No newline at end of file
diff --git a/archive/src/render/archive/backends/opengl/backend_opengl.c b/archive/src/render/archive/backends/opengl/backend_opengl.c
new file mode 100644
index 0000000..43105e2
--- /dev/null
+++ b/archive/src/render/archive/backends/opengl/backend_opengl.c
@@ -0,0 +1,521 @@
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include "colours.h"
+#include "maths.h"
+#include "opengl_helpers.h"
+#include "ral_types.h"
+#define CEL_REND_BACKEND_OPENGL
+#if defined(CEL_REND_BACKEND_OPENGL)
+#include <assert.h>
+#include <stdlib.h>
+
+#include "backend_opengl.h"
+#include "defines.h"
+#include "file.h"
+#include "log.h"
+#include "maths_types.h"
+#include "ral.h"
+
+#include <glad/glad.h>
+#include <glfw3.h>
+
+typedef struct opengl_context {
+ GLFWwindow* window;
+ arena pool_arena;
+ gpu_cmd_encoder command_buffer;
+ gpu_backend_pools gpu_pools;
+ struct resource_pools* resource_pools;
+} opengl_context;
+
+static opengl_context context;
+
+struct GLFWwindow;
+
+bool gpu_backend_init(const char* window_name, struct GLFWwindow* window) {
+ INFO("loading OpenGL backend");
+
+ memset(&context, 0, sizeof(opengl_context));
+ context.window = window;
+
+ size_t pool_buffer_size = 1024 * 1024;
+ context.pool_arena = arena_create(malloc(pool_buffer_size), pool_buffer_size);
+
+ backend_pools_init(&context.pool_arena, &context.gpu_pools);
+ context.resource_pools = malloc(sizeof(struct resource_pools));
+ resource_pools_init(&context.pool_arena, context.resource_pools);
+
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
+ glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
+ glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
+
+ // glad: load all opengl function pointers
+ if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress)) {
+ ERROR("Failed to initialise GLAD \n");
+ return false;
+ }
+
+ glEnable(GL_DEPTH_TEST);
+ glEnable(GL_CULL_FACE);
+
+ return true;
+}
+
+// --- Render Pipeline
+gpu_pipeline* gpu_graphics_pipeline_create(struct graphics_pipeline_desc description) {
+ gpu_pipeline* pipeline = pipeline_pool_alloc(&context.gpu_pools.pipelines, NULL);
+
+ // Create shader program
+ u32 shader_id = shader_create_separate(description.vs.filepath.buf, description.fs.filepath.buf);
+ pipeline->shader_id = shader_id;
+
+ // Vertex format
+ pipeline->vertex_desc = description.vertex_desc;
+
+ // Allocate uniform buffers if needed
+ u32 ubo_count = 0;
+ // printf("data layouts %d\n", description.data_layouts_count);
+ for (u32 layout_i = 0; layout_i < description.data_layouts_count; layout_i++) {
+ shader_data_layout sdl = description.data_layouts[layout_i].shader_data_get_layout(NULL);
+ TRACE("Got shader data layout %d's bindings! . found %d", layout_i, sdl.bindings_count);
+
+ for (u32 binding_j = 0; binding_j < sdl.bindings_count; binding_j++) {
+ u32 binding_id = binding_j;
+ assert(binding_id < MAX_PIPELINE_UNIFORM_BUFFERS);
+ shader_binding binding = sdl.bindings[binding_j];
+ if (binding.type == SHADER_BINDING_BYTES) {
+ static u32 s_binding_point = 0;
+ buffer_handle ubo_handle =
+ gpu_buffer_create(binding.data.bytes.size, CEL_BUFFER_UNIFORM, CEL_BUFFER_FLAG_GPU,
+ NULL); // no data right now
+ pipeline->uniform_bindings[ubo_count++] = ubo_handle;
+ gpu_buffer* ubo_buf = BUFFER_GET(ubo_handle);
+
+ i32 blockIndex = glGetUniformBlockIndex(pipeline->shader_id, binding.label);
+ printf("Block index for %s: %d", binding.label, blockIndex);
+ if (blockIndex < 0) {
+ WARN("Couldn't retrieve block index for uniform block '%s'", binding.label);
+ } else {
+ // DEBUG("Retrived block index %d for %s", blockIndex, binding.label);
+ }
+ u32 blocksize;
+ glGetActiveUniformBlockiv(pipeline->shader_id, blockIndex, GL_UNIFORM_BLOCK_DATA_SIZE,
+ &blocksize);
+ printf("\t with size %d bytes\n", blocksize);
+
+ glBindBufferBase(GL_UNIFORM_BUFFER, s_binding_point, ubo_buf->id.ubo);
+ if (blockIndex != GL_INVALID_INDEX) {
+ glUniformBlockBinding(pipeline->shader_id, blockIndex, s_binding_point);
+ }
+ ubo_buf->ubo_binding_point = s_binding_point++;
+ ubo_buf->name = binding.label;
+ assert(s_binding_point < GL_MAX_UNIFORM_BUFFER_BINDINGS);
+ }
+ }
+ }
+ pipeline->uniform_count = ubo_count;
+
+ pipeline->renderpass = description.renderpass;
+ pipeline->wireframe = description.wireframe;
+
+ return pipeline;
+}
+void gpu_pipeline_destroy(gpu_pipeline* pipeline) {}
+
+// --- Renderpass
+gpu_renderpass* gpu_renderpass_create(const gpu_renderpass_desc* description) {
+ gpu_renderpass* renderpass = renderpass_pool_alloc(&context.gpu_pools.renderpasses, NULL);
+ memcpy(&renderpass->description, description, sizeof(gpu_renderpass_desc));
+ bool default_framebuffer = description->default_framebuffer;
+
+ if (!default_framebuffer) {
+ GLuint gl_fbo_id;
+ glGenFramebuffers(1, &gl_fbo_id);
+ renderpass->fbo = gl_fbo_id;
+ } else {
+ renderpass->fbo = OPENGL_DEFAULT_FRAMEBUFFER;
+ assert(!description->has_color_target);
+ assert(!description->has_depth_stencil);
+ }
+ glBindFramebuffer(GL_FRAMEBUFFER, renderpass->fbo);
+
+ if (description->has_color_target && !default_framebuffer) {
+ gpu_texture* colour_attachment = TEXTURE_GET(description->color_target);
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
+ colour_attachment->id, 0);
+ }
+ if (description->has_depth_stencil && !default_framebuffer) {
+ gpu_texture* depth_attachment = TEXTURE_GET(description->depth_stencil);
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depth_attachment->id,
+ 0);
+ }
+
+ if (description->has_depth_stencil && !description->has_color_target) {
+ glDrawBuffer(GL_NONE);
+ glReadBuffer(GL_NONE);
+ }
+
+ glBindFramebuffer(GL_FRAMEBUFFER, 0); // reset to default framebuffer
+
+ return renderpass;
+}
+void gpu_renderpass_destroy(gpu_renderpass* pass) { glDeleteFramebuffers(1, &pass->fbo); }
+
+// --- Command buffer
+gpu_cmd_encoder gpu_cmd_encoder_create() {
+ gpu_cmd_encoder encoder = { 0 };
+ return encoder;
+}
+void gpu_cmd_encoder_destroy(gpu_cmd_encoder* encoder) {}
+void gpu_cmd_encoder_begin(gpu_cmd_encoder encoder) {}
+void gpu_cmd_encoder_begin_render(gpu_cmd_encoder* encoder, gpu_renderpass* renderpass) {
+ glBindFramebuffer(GL_FRAMEBUFFER, renderpass->fbo);
+ rgba clear_colour = STONE_800;
+ glClearColor(clear_colour.r, clear_colour.g, clear_colour.b, 1.0f);
+ if (renderpass->description.has_depth_stencil) {
+ glClear(GL_DEPTH_BUFFER_BIT);
+ } else {
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+ }
+}
+void gpu_cmd_encoder_end_render(gpu_cmd_encoder* encoder) { glBindFramebuffer(GL_FRAMEBUFFER, 0); }
+void gpu_cmd_encoder_begin_compute() {}
+gpu_cmd_encoder* gpu_get_default_cmd_encoder() { return &context.command_buffer; }
+
+/** @brief Finish recording and return a command buffer that can be submitted to a queue */
+gpu_cmd_buffer gpu_cmd_encoder_finish(gpu_cmd_encoder* encoder) {}
+
+void gpu_queue_submit(gpu_cmd_buffer* buffer) {}
+
+// --- Data copy commands
+/** @brief Copy data from one buffer to another */
+void encode_buffer_copy(gpu_cmd_encoder* encoder, buffer_handle src, u64 src_offset,
+ buffer_handle dst, u64 dst_offset, u64 copy_size) {}
+/** @brief Upload CPU-side data as array of bytes to a GPU buffer */
+void buffer_upload_bytes(buffer_handle gpu_buf, bytebuffer cpu_buf, u64 offset, u64 size) {
+ // TODO: finish implementing this
+ gpu_buffer* buf = BUFFER_GET(gpu_buf);
+}
+
+/** @brief Copy data from buffer to buffer using a one time submit command buffer and a wait */
+void copy_buffer_to_buffer_oneshot(buffer_handle src, u64 src_offset, buffer_handle dst,
+ u64 dst_offset, u64 copy_size) {}
+/** @brief Copy data from buffer to an image using a one time submit command buffer */
+void copy_buffer_to_image_oneshot(buffer_handle src, texture_handle dst) {}
+
+// --- Render commands
+void encode_bind_pipeline(gpu_cmd_encoder* encoder, pipeline_kind kind, gpu_pipeline* pipeline) {
+ encoder->pipeline = pipeline;
+
+ if (pipeline->wireframe) {
+ glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
+ } else {
+ glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
+ }
+
+ // In OpenGL binding a pipeline is more or less equivalent to just setting the shader
+ glUseProgram(pipeline->shader_id);
+}
+void encode_bind_shader_data(gpu_cmd_encoder* encoder, u32 group, shader_data* data) {
+ shader_data_layout sdl = data->shader_data_get_layout(data->data);
+ // printf("Binding %s shader data\n", sdl.name);
+
+ for (u32 i = 0; i < sdl.bindings_count; i++) {
+ shader_binding binding = sdl.bindings[i];
+ /* print_shader_binding(binding); */
+
+ if (binding.type == SHADER_BINDING_BYTES) {
+ buffer_handle b;
+ gpu_buffer* ubo_buf;
+ bool found = false;
+ for (u32 i = 0; i < encoder->pipeline->uniform_count; i++) {
+ b = encoder->pipeline->uniform_bindings[i];
+ ubo_buf = BUFFER_GET(b);
+ assert(ubo_buf->name != NULL);
+ if (strcmp(ubo_buf->name, binding.label) == 0) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ ERROR("Couldnt find uniform buffer object!!");
+ }
+
+ i32 blockIndex = glGetUniformBlockIndex(encoder->pipeline->shader_id, binding.label);
+ if (blockIndex < 0) {
+ WARN("Couldn't retrieve block index for uniform block '%s'", binding.label);
+ } else {
+ // DEBUG("Retrived block index %d for %s", blockIndex, binding.label);
+ }
+
+ glBindBuffer(GL_UNIFORM_BUFFER, ubo_buf->id.ubo);
+ glBufferSubData(GL_UNIFORM_BUFFER, 0, ubo_buf->size, binding.data.bytes.data);
+
+ } else if (binding.type == SHADER_BINDING_TEXTURE) {
+ gpu_texture* tex = TEXTURE_GET(binding.data.texture.handle);
+ GLint tex_slot = glGetUniformLocation(encoder->pipeline->shader_id, binding.label);
+ // printf("%d slot \n", tex_slot);
+ if (tex_slot == GL_INVALID_VALUE || tex_slot < 0) {
+ WARN("Invalid binding label for texture %s - couldn't fetch texture slot uniform",
+ binding.label);
+ }
+ glUniform1i(tex_slot, i);
+ glActiveTexture(GL_TEXTURE0 + i);
+ glBindTexture(GL_TEXTURE_2D, tex->id);
+ }
+ }
+}
+void encode_set_default_settings(gpu_cmd_encoder* encoder) {}
+void encode_set_vertex_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) {
+ gpu_buffer* buffer = BUFFER_GET(buf);
+ if (buffer->vao == 0) { // if no VAO for this vertex buffer, create it
+ INFO("Setting up VAO");
+ buffer->vao = opengl_bindcreate_vao(buffer, encoder->pipeline->vertex_desc);
+ }
+ glBindVertexArray(buffer->vao);
+}
+void encode_set_index_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) {
+ gpu_buffer* buffer = BUFFER_GET(buf);
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer->id.ibo);
+}
+void encode_draw(gpu_cmd_encoder* encoder, u64 count) { glDrawArrays(GL_TRIANGLES, 0, count); }
+void encode_draw_indexed(gpu_cmd_encoder* encoder, u64 index_count) {
+ /* printf("Draw %ld indices\n", index_count); */
+ glDrawElements(GL_TRIANGLES, index_count, GL_UNSIGNED_INT, 0);
+}
+void encode_clear_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) {}
+
+// --- Buffers
+buffer_handle gpu_buffer_create(u64 size, gpu_buffer_type buf_type, gpu_buffer_flags flags,
+ const void* data) {
+ // "allocating" the cpu-side buffer struct
+ buffer_handle handle;
+ gpu_buffer* buffer = buffer_pool_alloc(&context.resource_pools->buffers, &handle);
+ buffer->size = size;
+ buffer->vao = 0; // When we create a new buffer, there will be no VAO.
+
+ // Opengl buffer
+ GLuint gl_buffer_id;
+ glGenBuffers(1, &gl_buffer_id);
+
+ GLenum gl_buf_type;
+ GLenum gl_buf_usage = GL_STATIC_DRAW;
+
+ switch (buf_type) {
+ case CEL_BUFFER_UNIFORM:
+ DEBUG("Creating Uniform buffer");
+ gl_buf_type = GL_UNIFORM_BUFFER;
+ /* gl_buf_usage = GL_DYNAMIC_DRAW; */
+ buffer->id.ubo = gl_buffer_id;
+ break;
+ case CEL_BUFFER_DEFAULT:
+ case CEL_BUFFER_VERTEX:
+ DEBUG("Creating Vertex buffer");
+ gl_buf_type = GL_ARRAY_BUFFER;
+ buffer->id.vbo = gl_buffer_id;
+ break;
+ case CEL_BUFFER_INDEX:
+ DEBUG("Creating Index buffer");
+ gl_buf_type = GL_ELEMENT_ARRAY_BUFFER;
+ buffer->id.ibo = gl_buffer_id;
+ break;
+ default:
+ WARN("Unimplemented gpu_buffer_type provided %s", buffer_type_names[buf_type]);
+ break;
+ }
+ // bind buffer
+ glBindBuffer(gl_buf_type, gl_buffer_id);
+
+ if (data) {
+ TRACE("Upload data (%d bytes) as part of buffer creation", size);
+ glBufferData(gl_buf_type, buffer->size, data, gl_buf_usage);
+ } else {
+ TRACE("Allocating but not uploading (%d bytes)", size);
+ glBufferData(gl_buf_type, buffer->size, NULL, gl_buf_usage);
+ }
+
+ glBindBuffer(gl_buf_type, 0);
+
+ return handle;
+}
+
+texture_handle gpu_texture_create(texture_desc desc, bool create_view, const void* data) {
+ // "allocating" the cpu-side struct
+ texture_handle handle;
+ gpu_texture* texture = texture_pool_alloc(&context.resource_pools->textures, &handle);
+ DEBUG("Allocated texture with handle %d", handle.raw);
+
+ GLuint gl_texture_id;
+ glGenTextures(1, &gl_texture_id);
+ texture->id = gl_texture_id;
+
+ glBindTexture(GL_TEXTURE_2D, gl_texture_id);
+
+ GLint internal_format =
+ desc.format == CEL_TEXTURE_FORMAT_DEPTH_DEFAULT ? GL_DEPTH_COMPONENT : GL_RGB;
+ GLenum format = desc.format == CEL_TEXTURE_FORMAT_DEPTH_DEFAULT ? GL_DEPTH_COMPONENT : GL_RGBA;
+ GLenum data_type = desc.format == CEL_TEXTURE_FORMAT_DEPTH_DEFAULT ? GL_FLOAT : GL_UNSIGNED_BYTE;
+
+ if (desc.format == CEL_TEXTURE_FORMAT_DEPTH_DEFAULT) {
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
+ } else {
+ // set the texture wrapping parameters
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,
+ GL_REPEAT); // set texture wrapping to GL_REPEAT (default wrapping method)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
+ // set texture filtering parameters
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ }
+
+ if (data) {
+ glTexImage2D(GL_TEXTURE_2D, 0, internal_format, desc.extents.x, desc.extents.y, 0, format,
+ data_type, data);
+ glGenerateMipmap(GL_TEXTURE_2D);
+ } else {
+ WARN("No image data provided");
+ glTexImage2D(GL_TEXTURE_2D, 0, internal_format, desc.extents.x, desc.extents.y, 0, format,
+ data_type, NULL);
+ }
+
+ glBindTexture(GL_TEXTURE_2D, 0);
+
+ return handle;
+}
+
+void gpu_texture_destroy(texture_handle) {}
+void gpu_texture_upload(texture_handle texture, const void* data) {}
+
+// --- Vertex formats
+bytebuffer vertices_as_bytebuffer(arena* a, vertex_format format, vertex_darray* vertices) {}
+
+// --- TEMP
+bool gpu_backend_begin_frame() {
+ glClearColor(0.1f, 0.1f, 0.1f, 1.0f);
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+ return true;
+}
+void gpu_backend_end_frame() {
+ // TODO: Reset all bindings
+ glfwSwapBuffers(context.window);
+}
+void gpu_temp_draw(size_t n_verts) {}
+
+u32 shader_create_separate(const char* vert_shader, const char* frag_shader) {
+ INFO("Load shaders at %s and %s", vert_shader, frag_shader);
+ int success;
+ char info_log[512];
+
+ u32 vertex = glCreateShader(GL_VERTEX_SHADER);
+ const char* vertex_shader_src = string_from_file(vert_shader);
+ if (vertex_shader_src == NULL) {
+ ERROR("EXIT: couldnt load shader");
+ exit(-1);
+ }
+ glShaderSource(vertex, 1, &vertex_shader_src, NULL);
+ glCompileShader(vertex);
+ glGetShaderiv(vertex, GL_COMPILE_STATUS, &success);
+ if (!success) {
+ glGetShaderInfoLog(vertex, 512, NULL, info_log);
+ printf("%s\n", info_log);
+ ERROR("EXIT: vertex shader compilation failed");
+ exit(-1);
+ }
+
+ // fragment shader
+ u32 fragment = glCreateShader(GL_FRAGMENT_SHADER);
+ const char* fragment_shader_src = string_from_file(frag_shader);
+ if (fragment_shader_src == NULL) {
+ ERROR("EXIT: couldnt load shader");
+ exit(-1);
+ }
+ glShaderSource(fragment, 1, &fragment_shader_src, NULL);
+ glCompileShader(fragment);
+ glGetShaderiv(fragment, GL_COMPILE_STATUS, &success);
+ if (!success) {
+ glGetShaderInfoLog(fragment, 512, NULL, info_log);
+ printf("%s\n", info_log);
+ ERROR("EXIT: fragment shader compilation failed");
+ exit(-1);
+ }
+
+ u32 shader_prog;
+ shader_prog = glCreateProgram();
+
+ glAttachShader(shader_prog, vertex);
+ glAttachShader(shader_prog, fragment);
+ glLinkProgram(shader_prog);
+ glDeleteShader(vertex);
+ glDeleteShader(fragment);
+ free((char*)vertex_shader_src);
+ free((char*)fragment_shader_src);
+
+ return shader_prog;
+}
+
+inline void uniform_vec3f(u32 program_id, const char* uniform_name, vec3* value) {
+ glUniform3fv(glGetUniformLocation(program_id, uniform_name), 1, &value->x);
+}
+inline void uniform_f32(u32 program_id, const char* uniform_name, f32 value) {
+ glUniform1f(glGetUniformLocation(program_id, uniform_name), value);
+}
+inline void uniform_i32(u32 program_id, const char* uniform_name, i32 value) {
+ glUniform1i(glGetUniformLocation(program_id, uniform_name), value);
+}
+inline void uniform_mat4f(u32 program_id, const char* uniform_name, mat4* value) {
+ glUniformMatrix4fv(glGetUniformLocation(program_id, uniform_name), 1, GL_FALSE, value->data);
+}
+
+// void clear_screen(vec3 colour) {
+// glClearColor(colour.x, colour.y, colour.z, 1.0f);
+// glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+// }
+
+// void texture_data_upload(texture *tex) {
+// printf("Texture name %s\n", tex->name);
+// TRACE("Upload texture data");
+// u32 texture_id;
+// glGenTextures(1, &texture_id);
+// glBindTexture(GL_TEXTURE_2D, texture_id);
+// tex->texture_id = texture_id;
+
+// // set the texture wrapping parameters
+// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,
+// GL_REPEAT); // set texture wrapping to GL_REPEAT (default wrapping method)
+// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
+// // set texture filtering parameters
+// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
+// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+
+// glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, tex->width, tex->height, 0, tex->channel_type,
+// GL_UNSIGNED_BYTE, tex->image_data);
+// glGenerateMipmap(GL_TEXTURE_2D);
+// DEBUG("Freeing texture image data after uploading to GPU");
+// // stbi_image_free(tex->image_data); // data is on gpu now so we dont need it around
+// }
+
+// void bind_texture(shader s, texture *tex, u32 slot) {
+// // printf("bind texture slot %d with texture id %d \n", slot, tex->texture_id);
+// glActiveTexture(GL_TEXTURE0 + slot);
+// glBindTexture(GL_TEXTURE_2D, tex->texture_id);
+// }
+
+// void bind_mesh_vertex_buffer(void *_backend, mesh *mesh) { glBindVertexArray(mesh->vao); }
+
+// static inline GLenum to_gl_prim_topology(enum cel_primitive_topology primitive) {
+// switch (primitive) {
+// case CEL_PRIMITIVE_TOPOLOGY_TRIANGLE:
+// return GL_TRIANGLES;
+// case CEL_PRIMITIVE_TOPOLOGY_POINT:
+// case CEL_PRIMITIVE_TOPOLOGY_LINE:
+// case CEL_PRIMITIVE_TOPOLOGY_LINE_STRIP:
+// case CEL_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
+// case CEL_PRIMITIVE_TOPOLOGY_COUNT:
+// break;
+// }
+// }
+#endif
diff --git a/archive/src/render/archive/backends/opengl/backend_opengl.h b/archive/src/render/archive/backends/opengl/backend_opengl.h
new file mode 100644
index 0000000..14b44af
--- /dev/null
+++ b/archive/src/render/archive/backends/opengl/backend_opengl.h
@@ -0,0 +1,68 @@
+#pragma once
+
+#ifdef CEL_REND_BACKEND_OPENGL
+
+#include "defines.h"
+#include "maths_types.h"
+#include "ral.h"
+#include "ral_types.h"
+
+#define MAX_PIPELINE_UNIFORM_BUFFERS 32
+
+#define OPENGL_DEFAULT_FRAMEBUFFER 0
+
+typedef struct gpu_swapchain {
+ u32x2 dimensions;
+} gpu_swapchain;
+typedef struct gpu_device {
+} gpu_device;
+typedef struct gpu_pipeline_layout {
+ void* pad
+} gpu_pipeline_layout;
+typedef struct gpu_pipeline {
+ u32 shader_id;
+ gpu_renderpass* renderpass;
+ vertex_description vertex_desc;
+ buffer_handle uniform_bindings[MAX_PIPELINE_UNIFORM_BUFFERS];
+ u32 uniform_count;
+ bool wireframe;
+} gpu_pipeline;
+typedef struct gpu_renderpass {
+ u32 fbo;
+ gpu_renderpass_desc description;
+} gpu_renderpass;
+typedef struct gpu_cmd_encoder {
+ gpu_pipeline* pipeline;
+} gpu_cmd_encoder; // Recording
+typedef struct gpu_cmd_buffer {
+ void* pad;
+} gpu_cmd_buffer; // Ready for submission
+
+typedef struct gpu_buffer {
+ union {
+ u32 vbo;
+ u32 ibo;
+ u32 ubo;
+ } id;
+ union {
+ u32 vao;
+ u32 ubo_binding_point
+ }; // Optional
+ char* name;
+ u64 size;
+} gpu_buffer;
+typedef struct gpu_texture {
+ u32 id;
+ void* pad;
+} gpu_texture;
+
+typedef struct opengl_support {
+} opengl_support;
+
+u32 shader_create_separate(const char* vert_shader, const char* frag_shader);
+
+void uniform_vec3f(u32 program_id, const char* uniform_name, vec3* value);
+void uniform_f32(u32 program_id, const char* uniform_name, f32 value);
+void uniform_i32(u32 program_id, const char* uniform_name, i32 value);
+void uniform_mat4f(u32 program_id, const char* uniform_name, mat4* value);
+#endif
diff --git a/archive/src/render/archive/backends/vulkan/README.md b/archive/src/render/archive/backends/vulkan/README.md
new file mode 100644
index 0000000..220ed64
--- /dev/null
+++ b/archive/src/render/archive/backends/vulkan/README.md
@@ -0,0 +1 @@
+# Vulkan Backend Overview \ No newline at end of file
diff --git a/archive/src/render/archive/backends/vulkan/backend_vulkan.c b/archive/src/render/archive/backends/vulkan/backend_vulkan.c
new file mode 100644
index 0000000..8801230
--- /dev/null
+++ b/archive/src/render/archive/backends/vulkan/backend_vulkan.c
@@ -0,0 +1,1705 @@
+#include "defines.h"
+#if defined(CEL_REND_BACKEND_VULKAN)
+
+#define GLFW_INCLUDE_VULKAN
+#include <glfw3.h>
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <vulkan/vk_platform.h>
+#include <vulkan/vulkan.h>
+#include <vulkan/vulkan_core.h>
+
+#include "backend_vulkan.h"
+#include "buf.h"
+#include "darray.h"
+#include "maths_types.h"
+#include "mem.h"
+#include "ral_types.h"
+#include "str.h"
+#include "vulkan_helpers.h"
+
+#include "file.h"
+#include "log.h"
+#include "ral.h"
+#include "utils.h"
+
+// TEMP
+#define SCREEN_WIDTH 1000
+#define SCREEN_HEIGHT 1000
+#define VULKAN_QUEUES_COUNT 2
+#define MAX_DESCRIPTOR_SETS 10
+
+const char* queue_names[VULKAN_QUEUES_COUNT] = { "GRAPHICS", "TRANSFER" };
+
+KITC_DECL_TYPED_ARRAY(VkDescriptorSet)
+
+typedef struct vulkan_context {
+ VkInstance instance;
+ VkAllocationCallbacks* allocator;
+ VkSurfaceKHR surface;
+ vulkan_swapchain_support_info swapchain_support;
+
+ arena temp_arena;
+ arena pool_arena;
+ gpu_device* device;
+ gpu_swapchain* swapchain;
+ u32 framebuffer_count;
+ VkFramebuffer*
+ swapchain_framebuffers; // TODO: Move this data into the swapchain as its own struct
+
+ u32 current_img_index;
+ u32 current_frame; // super important
+ gpu_cmd_encoder main_cmd_bufs[MAX_FRAMES_IN_FLIGHT];
+ VkSemaphore image_available_semaphores[MAX_FRAMES_IN_FLIGHT];
+ VkSemaphore render_finished_semaphores[MAX_FRAMES_IN_FLIGHT];
+ VkFence in_flight_fences[MAX_FRAMES_IN_FLIGHT];
+
+ // HACK
+ VkRenderPass main_renderpass;
+
+ u32 screen_width;
+ u32 screen_height;
+ bool is_resizing;
+ GLFWwindow* window;
+
+ // Storage
+ gpu_buffer buffers[1024];
+ size_t buffer_count;
+ VkDescriptorSet_darray* free_set_queue;
+ struct resource_pools* resource_pools;
+ gpu_backend_pools gpu_pools;
+
+ VkDebugUtilsMessengerEXT vk_debugger;
+} vulkan_context;
+
+static vulkan_context context;
+
+// --- Function forward declarations
+
+void backend_pools_init(arena* a, gpu_backend_pools* backend_pools);
+
+/** @brief Enumerates and selects the most appropriate graphics device */
+bool select_physical_device(gpu_device* out_device);
+
+bool is_physical_device_suitable(VkPhysicalDevice device);
+
+queue_family_indices find_queue_families(VkPhysicalDevice device);
+
+bool create_logical_device(gpu_device* out_device);
+void create_swapchain_framebuffers();
+void create_sync_objects();
+void create_descriptor_pools();
+size_t vertex_attrib_size(vertex_attrib_type attr);
+
+VkShaderModule create_shader_module(str8 spirv);
+
+/** @brief Helper function for creating array of all extensions we want */
+cstr_darray* get_all_extensions();
+
+VkImage vulkan_image_create(u32x2 dimensions, VkImageType image_type, VkFormat format,
+ VkImageUsageFlags usage);
+void vulkan_transition_image_layout(gpu_texture* texture, VkFormat format, VkImageLayout old_layout,
+ VkImageLayout new_layout);
+
+// --- Handy macros
+#define BUFFER_GET(h) (buffer_pool_get(&context.resource_pools->buffers, h))
+#define TEXTURE_GET(h) (texture_pool_get(&context.resource_pools->textures, h))
+
+bool gpu_backend_init(const char* window_name, GLFWwindow* window) {
+ memset(&context, 0, sizeof(vulkan_context));
+ context.allocator = 0; // TODO: use an allocator
+ context.screen_width = SCREEN_WIDTH;
+ context.screen_height = SCREEN_HEIGHT;
+ context.window = window;
+ context.current_img_index = 0;
+ context.current_frame = 0;
+ context.free_set_queue = VkDescriptorSet_darray_new(100);
+
+ // Create an allocator
+ size_t temp_arena_size = 1024 * 1024;
+ context.temp_arena = arena_create(malloc(temp_arena_size), temp_arena_size);
+
+ size_t pool_buffer_size = 1024 * 1024;
+ context.pool_arena = arena_create(malloc(pool_buffer_size), pool_buffer_size);
+
+ backend_pools_init(&context.pool_arena, &context.gpu_pools);
+
+ // Setup Vulkan instance
+ VkApplicationInfo app_info = { VK_STRUCTURE_TYPE_APPLICATION_INFO };
+ app_info.apiVersion = VK_API_VERSION_1_2;
+ app_info.pApplicationName = window_name;
+ app_info.applicationVersion = VK_MAKE_VERSION(1, 0, 0);
+ app_info.pEngineName = "Celeritas Engine";
+ app_info.engineVersion = VK_MAKE_VERSION(1, 0, 0);
+
+ VkInstanceCreateInfo create_info = { VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO };
+ create_info.pApplicationInfo = &app_info;
+
+ // Extensions
+ cstr_darray* required_extensions = cstr_darray_new(2);
+ // cstr_darray_push(required_extensions, VK_KHR_SURFACE_EXTENSION_NAME);
+
+ uint32_t count;
+ const char** extensions = glfwGetRequiredInstanceExtensions(&count);
+ for (u32 i = 0; i < count; i++) {
+ cstr_darray_push(required_extensions, extensions[i]);
+ }
+
+ cstr_darray_push(required_extensions, VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
+
+ DEBUG("Required extensions:");
+ for (u32 i = 0; i < cstr_darray_len(required_extensions); i++) {
+ DEBUG(" %s", required_extensions->data[i]);
+ }
+
+ create_info.enabledExtensionCount = cstr_darray_len(required_extensions);
+ create_info.ppEnabledExtensionNames = required_extensions->data;
+
+ // TODO: Validation layers
+ create_info.enabledLayerCount = 0;
+ create_info.ppEnabledLayerNames = NULL;
+
+ INFO("Validation layers enabled");
+ cstr_darray* desired_validation_layers = cstr_darray_new(1);
+ cstr_darray_push(desired_validation_layers, "VK_LAYER_KHRONOS_validation");
+
+ u32 n_available_layers = 0;
+ VK_CHECK(vkEnumerateInstanceLayerProperties(&n_available_layers, 0));
+ TRACE("%d available layers", n_available_layers);
+ VkLayerProperties* available_layers =
+ arena_alloc(&context.temp_arena, n_available_layers * sizeof(VkLayerProperties));
+ VK_CHECK(vkEnumerateInstanceLayerProperties(&n_available_layers, available_layers));
+
+ for (int i = 0; i < cstr_darray_len(desired_validation_layers); i++) {
+ // look through layers to make sure we can find the ones we want
+ bool found = false;
+ for (int j = 0; j < n_available_layers; j++) {
+ if (str8_equals(str8_cstr_view(desired_validation_layers->data[i]),
+ str8_cstr_view(available_layers[j].layerName))) {
+ found = true;
+ TRACE("Found layer %s", desired_validation_layers->data[i]);
+ break;
+ }
+ }
+
+ if (!found) {
+ FATAL("Required validation is missing %s", desired_validation_layers->data[i]);
+ return false;
+ }
+ }
+ INFO("All validation layers are present");
+ create_info.enabledLayerCount = cstr_darray_len(desired_validation_layers);
+ create_info.ppEnabledLayerNames = desired_validation_layers->data;
+
+ VkResult result = vkCreateInstance(&create_info, NULL, &context.instance);
+ if (result != VK_SUCCESS) {
+ ERROR("vkCreateInstance failed with result: %u", result);
+ return false;
+ }
+ TRACE("Vulkan Instance created");
+
+ DEBUG("Creating Vulkan debugger");
+ u32 log_severity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT;
+ VkDebugUtilsMessengerCreateInfoEXT debug_create_info = {
+ VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT
+ };
+ debug_create_info.messageSeverity = log_severity;
+ debug_create_info.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT;
+ debug_create_info.pfnUserCallback = vk_debug_callback;
+
+ PFN_vkCreateDebugUtilsMessengerEXT func =
+ (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(context.instance,
+ "vkCreateDebugUtilsMessengerEXT");
+ assert(func);
+ VK_CHECK(func(context.instance, &debug_create_info, context.allocator, &context.vk_debugger));
+ DEBUG("Vulkan Debugger created");
+
+ // Surface creation
+ VkSurfaceKHR surface;
+ VK_CHECK(glfwCreateWindowSurface(context.instance, window, NULL, &surface));
+ context.surface = surface;
+ TRACE("Vulkan Surface created");
+
+ return true;
+}
+
+void gpu_backend_shutdown() {
+ gpu_swapchain_destroy(context.swapchain);
+
+ vkDestroySurfaceKHR(context.instance, context.surface, context.allocator);
+ vkDestroyInstance(context.instance, context.allocator);
+ arena_free_storage(&context.temp_arena);
+}
+
+bool gpu_device_create(gpu_device* out_device) {
+ // First things first store this poitner from the renderer
+ context.device = out_device;
+
+ arena_save savept = arena_savepoint(&context.temp_arena);
+ // Physical device
+ if (!select_physical_device(out_device)) {
+ return false;
+ }
+ TRACE("Physical device selected");
+
+ // Logical device & Queues
+ create_logical_device(out_device);
+
+ // Create the command pool
+ VkCommandPoolCreateInfo pool_create_info = { VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO };
+ pool_create_info.queueFamilyIndex = out_device->queue_family_indicies.graphics_family_index;
+ pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
+ vkCreateCommandPool(out_device->logical_device, &pool_create_info, context.allocator,
+ &out_device->pool);
+ TRACE("Command Pool created");
+
+ // Synchronisation objects
+ create_sync_objects();
+ TRACE("Synchronisation primitives created");
+
+ arena_rewind(savept); // Free any temp data
+ return true;
+}
+
+bool gpu_swapchain_create(gpu_swapchain* out_swapchain) {
+ context.swapchain = out_swapchain;
+
+ out_swapchain->swapchain_arena = arena_create(malloc(1024), 1024);
+
+ vulkan_device_query_swapchain_support(context.device->physical_device, context.surface,
+ &context.swapchain_support);
+ vulkan_swapchain_support_info swapchain_support = context.swapchain_support;
+
+ // TODO: custom swapchain extents VkExtent2D swapchain_extent = { width, height };
+
+ VkSurfaceFormatKHR image_format = choose_swapchain_format(&swapchain_support);
+ out_swapchain->image_format = image_format;
+ VkPresentModeKHR present_mode = VK_PRESENT_MODE_FIFO_KHR; // guaranteed to be implemented
+ out_swapchain->present_mode = present_mode;
+
+ u32 image_count = swapchain_support.capabilities.minImageCount + 1;
+ out_swapchain->image_count = image_count;
+
+ VkSwapchainCreateInfoKHR swapchain_create_info = { VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR };
+ swapchain_create_info.surface = context.surface;
+ swapchain_create_info.minImageCount = image_count;
+ swapchain_create_info.imageFormat = image_format.format;
+ swapchain_create_info.imageColorSpace = image_format.colorSpace;
+ swapchain_create_info.imageExtent = swapchain_support.capabilities.currentExtent;
+ swapchain_create_info.imageArrayLayers = 1;
+ swapchain_create_info.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ swapchain_create_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ swapchain_create_info.queueFamilyIndexCount = 0;
+ swapchain_create_info.pQueueFamilyIndices = NULL;
+
+ swapchain_create_info.preTransform = swapchain_support.capabilities.currentTransform;
+ swapchain_create_info.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+ swapchain_create_info.presentMode = present_mode;
+ swapchain_create_info.clipped = VK_TRUE;
+ swapchain_create_info.oldSwapchain = VK_NULL_HANDLE;
+
+ out_swapchain->extent = swapchain_support.capabilities.currentExtent;
+
+ VK_CHECK(vkCreateSwapchainKHR(context.device->logical_device, &swapchain_create_info,
+ context.allocator, &out_swapchain->handle));
+ TRACE("Vulkan Swapchain created");
+
+ // Retrieve Images
+ // out_swapchain->images =
+ // arena_alloc(&out_swapchain->swapchain_arena, image_count * sizeof(VkImage));
+ out_swapchain->images = malloc(image_count * sizeof(VkImage));
+ VK_CHECK(vkGetSwapchainImagesKHR(context.device->logical_device, out_swapchain->handle,
+ &image_count, out_swapchain->images));
+
+ // Create ImageViews
+ // TODO: Move this to a separate function
+ out_swapchain->image_views = malloc(image_count * sizeof(VkImageView));
+ // arena_alloc(&out_swapchain->swapchain_arena, image_count * sizeof(VkImageView));
+ for (u32 i = 0; i < image_count; i++) {
+ VkImageViewCreateInfo view_create_info = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO };
+ view_create_info.image = out_swapchain->images[i];
+ view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
+ view_create_info.format = image_format.format;
+ view_create_info.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
+ view_create_info.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
+ view_create_info.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
+ view_create_info.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
+ view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ view_create_info.subresourceRange.baseMipLevel = 0;
+ view_create_info.subresourceRange.levelCount = 1;
+ view_create_info.subresourceRange.baseArrayLayer = 0;
+ view_create_info.subresourceRange.layerCount = 1;
+ vkCreateImageView(context.device->logical_device, &view_create_info, context.allocator,
+ &out_swapchain->image_views[i]);
+ }
+
+ return true;
+}
+
+void gpu_swapchain_destroy(gpu_swapchain* swapchain) {
+ // Destroy Framebuffers
+ DEBUG("Image count %d", swapchain->image_count);
+ for (u32 i = 0; i < swapchain->image_count; i++) {
+ DEBUG("Framebuffer handle %d", context.swapchain_framebuffers[i]);
+ vkDestroyFramebuffer(context.device->logical_device, context.swapchain_framebuffers[i],
+ context.allocator);
+ }
+ for (u32 i = 0; i < swapchain->image_count; i++) {
+ vkDestroyImageView(context.device->logical_device, swapchain->image_views[i],
+ context.allocator);
+ }
+ arena_free_all(&swapchain->swapchain_arena);
+ vkDestroySwapchainKHR(context.device->logical_device, swapchain->handle, context.allocator);
+ TRACE("Vulkan Swapchain destroyed");
+}
+
+static void recreate_swapchain(gpu_swapchain* swapchain) {
+ int width = 0, height = 0;
+ glfwGetFramebufferSize(context.window, &width, &height);
+ while (width == 0 || height == 0) {
+ glfwGetFramebufferSize(context.window, &width, &height);
+ glfwWaitEvents();
+ }
+ DEBUG("Recreating swapchain...");
+ vkDeviceWaitIdle(context.device->logical_device);
+
+ gpu_swapchain_destroy(swapchain);
+ gpu_swapchain_create(swapchain);
+ create_swapchain_framebuffers();
+}
+
+VkFormat format_from_vertex_attr(vertex_attrib_type attr) {
+ switch (attr) {
+ case ATTR_F32:
+ return VK_FORMAT_R32_SFLOAT;
+ case ATTR_U32:
+ return VK_FORMAT_R32_UINT;
+ case ATTR_I32:
+ return VK_FORMAT_R32_SINT;
+ case ATTR_F32x2:
+ return VK_FORMAT_R32G32_SFLOAT;
+ case ATTR_U32x2:
+ return VK_FORMAT_R32G32_UINT;
+ case ATTR_I32x2:
+ return VK_FORMAT_R32G32_UINT;
+ case ATTR_F32x3:
+ return VK_FORMAT_R32G32B32_SFLOAT;
+ case ATTR_U32x3:
+ return VK_FORMAT_R32G32B32_UINT;
+ case ATTR_I32x3:
+ return VK_FORMAT_R32G32B32_SINT;
+ case ATTR_F32x4:
+ return VK_FORMAT_R32G32B32A32_SFLOAT;
+ case ATTR_U32x4:
+ return VK_FORMAT_R32G32B32A32_UINT;
+ case ATTR_I32x4:
+ return VK_FORMAT_R32G32B32A32_SINT;
+ }
+}
+
+gpu_pipeline* gpu_graphics_pipeline_create(struct graphics_pipeline_desc description) {
+ TRACE("GPU Graphics Pipeline creation");
+ // Allocate
+ gpu_pipeline_layout* layout =
+ pipeline_layout_pool_alloc(&context.gpu_pools.pipeline_layouts, NULL);
+ gpu_pipeline* pipeline = pipeline_pool_alloc(&context.gpu_pools.pipelines, NULL);
+
+ // Shaders
+ printf("Vertex shader: %s\n", description.vs.filepath.buf);
+ printf("Fragment shader: %s\n", description.fs.filepath.buf);
+ VkShaderModule vertex_shader = create_shader_module(description.vs.code);
+ VkShaderModule fragment_shader = create_shader_module(description.fs.code);
+
+ // Vertex
+ VkPipelineShaderStageCreateInfo vert_shader_stage_info = {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
+ };
+ vert_shader_stage_info.stage = VK_SHADER_STAGE_VERTEX_BIT;
+ vert_shader_stage_info.module = vertex_shader;
+ vert_shader_stage_info.pName = "main";
+ // Fragment
+ VkPipelineShaderStageCreateInfo frag_shader_stage_info = {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
+ };
+ frag_shader_stage_info.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
+ frag_shader_stage_info.module = fragment_shader;
+ frag_shader_stage_info.pName = "main";
+
+ VkPipelineShaderStageCreateInfo shader_stages[2] = { vert_shader_stage_info,
+ frag_shader_stage_info };
+
+ // Attributes
+ u32 attr_count = description.vertex_desc.attributes_count;
+ printf("N attributes %d\n", attr_count);
+ VkVertexInputAttributeDescription attribute_descs[attr_count];
+ memset(attribute_descs, 0, attr_count * sizeof(VkVertexInputAttributeDescription));
+ u32 offset = 0;
+ for (u32 i = 0; i < description.vertex_desc.attributes_count; i++) {
+ attribute_descs[i].binding = 0;
+ attribute_descs[i].location = i;
+ attribute_descs[i].format = format_from_vertex_attr(description.vertex_desc.attributes[i]);
+ attribute_descs[i].offset = offset;
+ size_t this_offset = vertex_attrib_size(description.vertex_desc.attributes[i]);
+ printf("offset total %d this attr %ld\n", offset, this_offset);
+ printf("sizeof vertex %ld\n", sizeof(vertex));
+ offset += this_offset;
+ }
+
+ // Vertex input
+ // TODO: Generate this from descroiption now
+ VkVertexInputBindingDescription binding_desc;
+ binding_desc.binding = 0;
+ binding_desc.stride = description.vertex_desc.use_full_vertex_size
+ ? sizeof(vertex)
+ : description.vertex_desc.stride;
+ binding_desc.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
+
+ VkPipelineVertexInputStateCreateInfo vertex_input_info = {
+ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO
+ };
+ vertex_input_info.vertexBindingDescriptionCount = 1;
+ vertex_input_info.pVertexBindingDescriptions = &binding_desc;
+ vertex_input_info.vertexAttributeDescriptionCount =
+ attr_count; // description.vertex_desc.attributes_count;
+ vertex_input_info.pVertexAttributeDescriptions = attribute_descs;
+
+ // Input Assembly
+ VkPipelineInputAssemblyStateCreateInfo input_assembly = {
+ VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO
+ };
+ input_assembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
+ input_assembly.primitiveRestartEnable = VK_FALSE;
+
+ // Viewport
+ VkViewport viewport = { .x = 0,
+ .y = 0,
+ .width = (f32)context.swapchain->extent.width,
+ .height = (f32)context.swapchain->extent.height,
+ .minDepth = 0.0,
+ .maxDepth = 1.0 };
+ VkRect2D scissor = { .offset = { .x = 0, .y = 0 }, .extent = context.swapchain->extent };
+ VkPipelineViewportStateCreateInfo viewport_state = {
+ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO
+ };
+ viewport_state.viewportCount = 1;
+ // viewport_state.pViewports = &viewport;
+ viewport_state.scissorCount = 1;
+ // viewport_state.pScissors = &scissor;
+
+ // Rasterizer
+ VkPipelineRasterizationStateCreateInfo rasterizer_create_info = {
+ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO
+ };
+ rasterizer_create_info.depthClampEnable = VK_FALSE;
+ rasterizer_create_info.rasterizerDiscardEnable = VK_FALSE;
+ rasterizer_create_info.polygonMode =
+ description.wireframe ? VK_POLYGON_MODE_LINE : VK_POLYGON_MODE_FILL;
+ rasterizer_create_info.lineWidth = 1.0f;
+ rasterizer_create_info.cullMode = VK_CULL_MODE_BACK_BIT;
+ rasterizer_create_info.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
+ /* rasterizer_create_info.frontFace = VK_FRONT_FACE_CLOCKWISE; */
+ rasterizer_create_info.depthBiasEnable = VK_FALSE;
+ rasterizer_create_info.depthBiasConstantFactor = 0.0;
+ rasterizer_create_info.depthBiasClamp = 0.0;
+ rasterizer_create_info.depthBiasSlopeFactor = 0.0;
+
+ // Multisampling
+ VkPipelineMultisampleStateCreateInfo ms_create_info = {
+ VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO
+ };
+ ms_create_info.sampleShadingEnable = VK_FALSE;
+ ms_create_info.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
+ ms_create_info.minSampleShading = 1.0;
+ ms_create_info.pSampleMask = 0;
+ ms_create_info.alphaToCoverageEnable = VK_FALSE;
+ ms_create_info.alphaToOneEnable = VK_FALSE;
+
+ // TODO: Depth and stencil testing
+ // VkPipelineDepthStencilStateCreateInfo depth_stencil = {
+ // VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO
+ // };
+ // depth_stencil.depthTestEnable = description.depth_test ? VK_TRUE : VK_FALSE;
+ // depth_stencil.depthWriteEnable = description.depth_test ? VK_TRUE : VK_FALSE;
+ // depth_stencil.depthCompareOp = VK_COMPARE_OP_LESS;
+ // depth_stencil.depthBoundsTestEnable = VK_FALSE;
+ // depth_stencil.stencilTestEnable = VK_FALSE;
+ // depth_stencil.pNext = 0;
+
+ // Blending
+ VkPipelineColorBlendAttachmentState color_blend_attachment_state;
+ color_blend_attachment_state.blendEnable = VK_FALSE;
+ color_blend_attachment_state.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA;
+ color_blend_attachment_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
+ color_blend_attachment_state.colorBlendOp = VK_BLEND_OP_ADD;
+ color_blend_attachment_state.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA;
+ color_blend_attachment_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
+ color_blend_attachment_state.alphaBlendOp = VK_BLEND_OP_ADD;
+ color_blend_attachment_state.colorWriteMask = VK_COLOR_COMPONENT_R_BIT |
+ VK_COLOR_COMPONENT_G_BIT |
+ VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
+
+ VkPipelineColorBlendStateCreateInfo color_blend = {
+ VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO
+ };
+ color_blend.logicOpEnable = VK_FALSE;
+ color_blend.logicOp = VK_LOGIC_OP_COPY;
+ color_blend.attachmentCount = 1;
+ color_blend.pAttachments = &color_blend_attachment_state;
+
+// Dynamic state
+#define DYNAMIC_STATE_COUNT 2
+ VkDynamicState dynamic_states[DYNAMIC_STATE_COUNT] = {
+ VK_DYNAMIC_STATE_VIEWPORT,
+ VK_DYNAMIC_STATE_SCISSOR,
+ };
+
+ VkPipelineDynamicStateCreateInfo dynamic_state = {
+ VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO
+ };
+ dynamic_state.dynamicStateCount = DYNAMIC_STATE_COUNT;
+ dynamic_state.pDynamicStates = dynamic_states;
+
+ // Descriptor Set layouts
+
+ VkDescriptorSetLayout* desc_set_layouts =
+ malloc(description.data_layouts_count * sizeof(VkDescriptorSetLayout));
+ pipeline->desc_set_layouts = desc_set_layouts;
+ pipeline->desc_set_layouts_count = description.data_layouts_count;
+ if (description.data_layouts_count > 0) {
+ pipeline->uniform_pointers =
+ malloc(description.data_layouts_count * sizeof(desc_set_uniform_buffer));
+ } else {
+ pipeline->uniform_pointers = NULL;
+ }
+
+ // assert(description.data_layouts_count == 1);
+ printf("data layouts %d\n", description.data_layouts_count);
+ for (u32 layout_i = 0; layout_i < description.data_layouts_count; layout_i++) {
+ shader_data_layout sdl = description.data_layouts[layout_i].shader_data_get_layout(NULL);
+ TRACE("Got shader data layout %d's bindings! . found %d", layout_i, sdl.bindings_count);
+
+ VkDescriptorSetLayoutBinding desc_set_bindings[sdl.bindings_count];
+
+ // Bindings
+ assert(sdl.bindings_count == 2);
+ for (u32 binding_j = 0; binding_j < sdl.bindings_count; binding_j++) {
+ desc_set_bindings[binding_j].binding = binding_j;
+ desc_set_bindings[binding_j].descriptorCount = 1;
+ switch (sdl.bindings[binding_j].type) {
+ case SHADER_BINDING_BUFFER:
+ case SHADER_BINDING_BYTES:
+ desc_set_bindings[binding_j].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ desc_set_bindings[binding_j].stageFlags =
+ VK_SHADER_STAGE_VERTEX_BIT; // FIXME: dont hardcode
+
+ u64 buffer_size = sdl.bindings[binding_j].data.bytes.size;
+ VkDeviceSize uniform_buf_size = buffer_size;
+ // TODO: Create backing buffer
+
+ VkBuffer buffers[MAX_FRAMES_IN_FLIGHT];
+ VkDeviceMemory uniform_buf_memorys[MAX_FRAMES_IN_FLIGHT];
+ void* uniform_buf_mem_mappings[MAX_FRAMES_IN_FLIGHT];
+ // void* s?
+ for (size_t frame_i = 0; frame_i < MAX_FRAMES_IN_FLIGHT; frame_i++) {
+ buffer_handle uniform_buf_handle =
+ gpu_buffer_create(buffer_size, CEL_BUFFER_UNIFORM, CEL_BUFFER_FLAG_CPU, NULL);
+
+ gpu_buffer* created_gpu_buffer =
+ BUFFER_GET(uniform_buf_handle); // context.buffers[uniform_buf_handle.raw];
+ buffers[frame_i] = created_gpu_buffer->handle;
+ uniform_buf_memorys[frame_i] = created_gpu_buffer->memory;
+ vkMapMemory(context.device->logical_device, uniform_buf_memorys[frame_i], 0,
+ uniform_buf_size, 0, &uniform_buf_mem_mappings[frame_i]);
+ // now we have a pointer in unifrom_buf_mem_mappings we can write to
+ }
+
+ desc_set_uniform_buffer uniform_data;
+ memcpy(&uniform_data.buffers, &buffers, sizeof(buffers));
+ memcpy(&uniform_data.uniform_buf_memorys, &uniform_buf_memorys,
+ sizeof(uniform_buf_memorys));
+ memcpy(&uniform_data.uniform_buf_mem_mappings, &uniform_buf_mem_mappings,
+ sizeof(uniform_buf_mem_mappings));
+ uniform_data.size = buffer_size;
+
+ pipeline->uniform_pointers[binding_j] = uniform_data;
+
+ break;
+ case SHADER_BINDING_TEXTURE:
+ desc_set_bindings[binding_j].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ desc_set_bindings[binding_j].stageFlags =
+ VK_SHADER_STAGE_FRAGMENT_BIT; // FIXME: dont hardcode
+ desc_set_bindings[binding_j].pImmutableSamplers = NULL;
+
+ break;
+ default:
+ ERROR_EXIT("Unimplemented binding type!! in backend_vulkan");
+ }
+ switch (sdl.bindings[binding_j].vis) {
+ case VISIBILITY_VERTEX:
+ desc_set_bindings[binding_j].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
+ break;
+ case VISIBILITY_FRAGMENT:
+ desc_set_bindings[binding_j].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
+ break;
+ case VISIBILITY_COMPUTE:
+ WARN("Compute is not implemented yet");
+ break;
+ }
+ }
+
+ VkDescriptorSetLayoutCreateInfo desc_set_layout_info = {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
+ };
+ desc_set_layout_info.bindingCount = sdl.bindings_count;
+ desc_set_layout_info.pBindings = desc_set_bindings;
+
+ VK_CHECK(vkCreateDescriptorSetLayout(context.device->logical_device, &desc_set_layout_info,
+ context.allocator, &desc_set_layouts[layout_i]));
+ }
+ printf("Descriptor set layouts\n");
+
+ // Layout
+ VkPipelineLayoutCreateInfo pipeline_layout_create_info = {
+ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
+ };
+ pipeline_layout_create_info.setLayoutCount = description.data_layouts_count;
+ pipeline_layout_create_info.pSetLayouts = desc_set_layouts;
+ pipeline_layout_create_info.pushConstantRangeCount = 0;
+ pipeline_layout_create_info.pPushConstantRanges = NULL;
+ VK_CHECK(vkCreatePipelineLayout(context.device->logical_device, &pipeline_layout_create_info,
+ context.allocator, &layout->handle));
+ pipeline->layout_handle = layout->handle; // keep a copy of the layout on the pipeline object
+
+ VkGraphicsPipelineCreateInfo pipeline_create_info = {
+ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
+ };
+
+ pipeline_create_info.stageCount = 2;
+ pipeline_create_info.pStages = shader_stages;
+ pipeline_create_info.pVertexInputState = &vertex_input_info;
+ pipeline_create_info.pInputAssemblyState = &input_assembly;
+
+ pipeline_create_info.pViewportState = &viewport_state;
+ pipeline_create_info.pRasterizationState = &rasterizer_create_info;
+ pipeline_create_info.pMultisampleState = &ms_create_info;
+ pipeline_create_info.pDepthStencilState = NULL; // &depth_stencil;
+ pipeline_create_info.pColorBlendState = &color_blend;
+ pipeline_create_info.pDynamicState = &dynamic_state;
+ pipeline_create_info.pTessellationState = 0;
+
+ pipeline_create_info.layout = layout->handle;
+
+ pipeline_create_info.renderPass = description.renderpass->handle;
+ pipeline_create_info.subpass = 0;
+ pipeline_create_info.basePipelineHandle = VK_NULL_HANDLE;
+ pipeline_create_info.basePipelineIndex = -1;
+
+ printf("About to create graphics pipeline\n");
+
+ VkResult result =
+ vkCreateGraphicsPipelines(context.device->logical_device, VK_NULL_HANDLE, 1,
+ &pipeline_create_info, context.allocator, &pipeline->handle);
+ if (result != VK_SUCCESS) {
+ FATAL("graphics pipeline creation failed. its fked mate");
+ ERROR_EXIT("Doomed");
+ }
+ TRACE("Vulkan Graphics pipeline created");
+
+ // once the pipeline has been created we can destroy these
+ vkDestroyShaderModule(context.device->logical_device, vertex_shader, context.allocator);
+ vkDestroyShaderModule(context.device->logical_device, fragment_shader, context.allocator);
+
+ // Framebuffers
+ create_swapchain_framebuffers();
+ TRACE("Swapchain Framebuffers created");
+
+ for (u32 frame_i = 0; frame_i < MAX_FRAMES_IN_FLIGHT; frame_i++) {
+ context.main_cmd_bufs[frame_i] = gpu_cmd_encoder_create();
+ }
+ TRACE("main Command Buffer created");
+
+ TRACE("Graphics pipeline created");
+ return pipeline;
+}
+
+void gpu_pipeline_destroy(gpu_pipeline* pipeline) {
+ vkDestroyPipeline(context.device->logical_device, pipeline->handle, context.allocator);
+ vkDestroyPipelineLayout(context.device->logical_device, pipeline->layout_handle,
+ context.allocator);
+}
+
+gpu_cmd_encoder* gpu_get_default_cmd_encoder() {
+ return &context.main_cmd_bufs[context.current_frame];
+}
+
+gpu_renderpass* gpu_renderpass_create(const gpu_renderpass_desc* description) {
+ gpu_renderpass* renderpass = renderpass_pool_alloc(&context.gpu_pools.renderpasses, NULL);
+
+ // attachments
+ u32 attachment_desc_count = 2;
+ VkAttachmentDescription attachment_descriptions[2];
+
+ // Colour attachment
+ VkAttachmentDescription color_attachment;
+ color_attachment.format = context.swapchain->image_format.format;
+ color_attachment.samples = VK_SAMPLE_COUNT_1_BIT;
+ color_attachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ color_attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ color_attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ color_attachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ color_attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ color_attachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
+ color_attachment.flags = 0;
+
+ attachment_descriptions[0] = color_attachment;
+
+ VkAttachmentReference color_attachment_reference;
+ color_attachment_reference.attachment = 0;
+ color_attachment_reference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+ // Depth attachment
+ u32x2 ext = { .x = context.swapchain_support.capabilities.currentExtent.width,
+ .y = context.swapchain_support.capabilities.currentExtent.height };
+ texture_desc depth_desc = { .extents = ext,
+ .format = CEL_TEXTURE_FORMAT_DEPTH_DEFAULT,
+ .tex_type = CEL_TEXTURE_TYPE_2D };
+ texture_handle depth_texture_handle = gpu_texture_create(depth_desc, true, NULL);
+ gpu_texture* depth = TEXTURE_GET(depth_texture_handle);
+
+ VkAttachmentDescription depth_attachment;
+ depth_attachment.format = // TODO: context->device.depth_format;
+ depth_attachment.samples = VK_SAMPLE_COUNT_1_BIT;
+ depth_attachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ depth_attachment.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ depth_attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ depth_attachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ depth_attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ depth_attachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ depth_attachment.flags = 0;
+
+ attachment_descriptions[1] = depth_attachment;
+
+ VkAttachmentReference depth_attachment_reference;
+ depth_attachment_reference.attachment = 1;
+ depth_attachment_reference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+
+ // main subpass
+ VkSubpassDescription subpass = { 0 };
+ subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ subpass.colorAttachmentCount = 1;
+ subpass.pColorAttachments = &color_attachment_reference;
+
+ // sets everything up
+ // renderpass dependencies
+ VkSubpassDependency dependency;
+ dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
+ dependency.dstSubpass = 0;
+ dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ dependency.srcAccessMask = 0;
+ dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ dependency.dstAccessMask =
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ dependency.dependencyFlags = 0;
+
+ // Finally, create the RenderPass
+ VkRenderPassCreateInfo render_pass_create_info = { VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO };
+ render_pass_create_info.attachmentCount = 1;
+ render_pass_create_info.pAttachments = &color_attachment;
+ render_pass_create_info.subpassCount = 1;
+ render_pass_create_info.pSubpasses = &subpass;
+ render_pass_create_info.dependencyCount = 1;
+ render_pass_create_info.pDependencies = &dependency;
+ render_pass_create_info.flags = 0;
+ render_pass_create_info.pNext = 0;
+
+ VK_CHECK(vkCreateRenderPass(context.device->logical_device, &render_pass_create_info,
+ context.allocator, &renderpass->handle));
+
+ // HACK
+ context.main_renderpass = renderpass->handle;
+
+ return renderpass;
+}
+
+gpu_cmd_encoder gpu_cmd_encoder_create() {
+ // gpu_cmd_encoder* encoder = malloc(sizeof(gpu_cmd_encoder)); // TODO: fix leaking mem
+ gpu_cmd_encoder encoder = { 0 };
+
+ VkCommandBufferAllocateInfo allocate_info = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO };
+ allocate_info.commandPool = context.device->pool;
+ allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
+ allocate_info.commandBufferCount = 1;
+ allocate_info.pNext = NULL;
+
+ VK_CHECK(vkAllocateCommandBuffers(context.device->logical_device, &allocate_info,
+ &encoder.cmd_buffer););
+
+ VkDescriptorPoolSize pool_sizes[2];
+ // Uniforms pool
+ pool_sizes[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ pool_sizes[0].descriptorCount = MAX_FRAMES_IN_FLIGHT * MAX_DESCRIPTOR_SETS;
+ // Samplers pool
+ pool_sizes[1].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ pool_sizes[1].descriptorCount = MAX_FRAMES_IN_FLIGHT * MAX_DESCRIPTOR_SETS;
+
+ VkDescriptorPoolCreateInfo pool_info = { VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO };
+ pool_info.poolSizeCount = 2;
+ pool_info.pPoolSizes = pool_sizes;
+ pool_info.maxSets = 100;
+
+ VK_CHECK(vkCreateDescriptorPool(context.device->logical_device, &pool_info, context.allocator,
+ &encoder.descriptor_pool));
+
+ return encoder;
+}
+void gpu_cmd_encoder_destroy(gpu_cmd_encoder* encoder) {
+ vkFreeCommandBuffers(context.device->logical_device, context.device->pool, 1,
+ &encoder->cmd_buffer);
+}
+
+void gpu_cmd_encoder_begin(gpu_cmd_encoder encoder) {
+ VK_CHECK(vkResetDescriptorPool(context.device->logical_device, encoder.descriptor_pool, 0));
+
+ VkCommandBufferBeginInfo begin_info = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO };
+ VK_CHECK(vkBeginCommandBuffer(encoder.cmd_buffer, &begin_info));
+}
+
+void gpu_cmd_encoder_begin_render(gpu_cmd_encoder* encoder, gpu_renderpass* renderpass) {
+ VkRenderPassBeginInfo begin_info = { VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO };
+ begin_info.renderPass = renderpass->handle;
+ /* printf("Current img: %d Current frame %d\n", context.current_img_index, context.current_frame);
+ */
+ begin_info.framebuffer = context.swapchain_framebuffers[context.current_img_index];
+ begin_info.renderArea.offset = (VkOffset2D){ 0, 0 };
+ begin_info.renderArea.extent = context.swapchain->extent;
+
+ // VkClearValue clear_values[2];
+ VkClearValue clear_color = { { { 0.02f, 0.02f, 0.02f, 1.0f } } };
+ // clear_values[1].depthStencil.depth = renderpass->depth;
+ // clear_values[1].depthStencil.stencil = renderpass->stencil;
+
+ begin_info.clearValueCount = 1;
+ begin_info.pClearValues = &clear_color;
+
+ vkCmdBeginRenderPass(encoder->cmd_buffer, &begin_info, VK_SUBPASS_CONTENTS_INLINE);
+ // command_buffer->state = COMMAND_BUFFER_STATE_IN_RENDER_PASS;
+}
+
+void gpu_cmd_encoder_end_render(gpu_cmd_encoder* encoder) {
+ vkCmdEndRenderPass(encoder->cmd_buffer);
+}
+
+gpu_cmd_buffer gpu_cmd_encoder_finish(gpu_cmd_encoder* encoder) {
+ vkEndCommandBuffer(encoder->cmd_buffer);
+
+ // TEMP: submit
+ return (gpu_cmd_buffer){ .cmd_buffer = encoder->cmd_buffer };
+}
+
+// --- Binding
+void encode_bind_pipeline(gpu_cmd_encoder* encoder, pipeline_kind kind, gpu_pipeline* pipeline) {
+ vkCmdBindPipeline(encoder->cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline->handle);
+ encoder->pipeline = pipeline;
+}
+
+void encode_bind_shader_data(gpu_cmd_encoder* encoder, u32 group, shader_data* data) {
+ arena tmp = arena_create(malloc(1024), 1024);
+
+ assert(data->data != NULL);
+
+ // Update the local buffer
+ desc_set_uniform_buffer ubo = encoder->pipeline->uniform_pointers[group];
+ memcpy(ubo.uniform_buf_mem_mappings[context.current_frame], data->data, ubo.size);
+
+ VkDescriptorSetAllocateInfo alloc_info = { VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO };
+ alloc_info.descriptorPool = encoder->descriptor_pool;
+ alloc_info.descriptorSetCount = 1;
+ alloc_info.pSetLayouts = &encoder->pipeline->desc_set_layouts[group];
+
+ shader_data_layout sdl = data->shader_data_get_layout(data->data);
+ size_t binding_count = sdl.bindings_count;
+ assert(binding_count == 2);
+
+ VkDescriptorSet sets[0];
+ VK_CHECK(vkAllocateDescriptorSets(context.device->logical_device, &alloc_info, sets));
+ // FIXME: hardcoded
+ VkDescriptorSet_darray_push(context.free_set_queue, sets[0]);
+ /* VkDescriptorSet_darray_push(context.free_set_queue, sets[1]); */
+
+ VkWriteDescriptorSet write_sets[binding_count];
+ memset(&write_sets, 0, binding_count * sizeof(VkWriteDescriptorSet));
+
+ for (u32 i = 0; i < sdl.bindings_count; i++) {
+ shader_binding binding = sdl.bindings[i];
+
+ if (binding.type == SHADER_BINDING_BUFFER || binding.type == SHADER_BINDING_BYTES) {
+ VkDescriptorBufferInfo* buffer_info = arena_alloc(&tmp, sizeof(VkDescriptorBufferInfo));
+ buffer_info->buffer = ubo.buffers[context.current_frame];
+ buffer_info->offset = 0;
+ buffer_info->range = binding.data.bytes.size;
+
+ write_sets[i].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ write_sets[i].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ write_sets[i].descriptorCount = 1;
+ write_sets[i].dstSet = sets[0];
+ write_sets[i].dstBinding = i;
+ write_sets[i].dstArrayElement = 0;
+ write_sets[i].pBufferInfo = buffer_info;
+ } else if (binding.type == SHADER_BINDING_TEXTURE) {
+ gpu_texture* texture = TEXTURE_GET(binding.data.texture.handle);
+ VkDescriptorImageInfo* image_info = arena_alloc(&tmp, sizeof(VkDescriptorImageInfo));
+ image_info->imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+ image_info->imageView = texture->view;
+ image_info->sampler = texture->sampler;
+
+ write_sets[i].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ write_sets[i].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ write_sets[i].descriptorCount = 1;
+ write_sets[i].dstSet = sets[0];
+ write_sets[i].dstBinding = i;
+ write_sets[i].dstArrayElement = 0;
+ write_sets[i].pImageInfo = image_info;
+ } else {
+ WARN("Unknown binding");
+ }
+ }
+
+ // Update
+ vkUpdateDescriptorSets(context.device->logical_device, binding_count, write_sets, 0, NULL);
+
+ // Bind
+ vkCmdBindDescriptorSets(encoder->cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
+ encoder->pipeline->layout_handle, 0, 1, sets, 0, NULL);
+
+ arena_free_storage(&tmp);
+}
+
+void encode_set_vertex_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) {
+ gpu_buffer* buffer = BUFFER_GET(buf); // context.buffers[buf.raw];
+ VkBuffer vbs[] = { buffer->handle };
+ VkDeviceSize offsets[] = { 0 };
+ vkCmdBindVertexBuffers(encoder->cmd_buffer, 0, 1, vbs, offsets);
+}
+
+void encode_set_index_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) {
+ gpu_buffer* buffer = BUFFER_GET(buf); // context.buffers[buf.raw];
+ vkCmdBindIndexBuffer(encoder->cmd_buffer, buffer->handle, 0, VK_INDEX_TYPE_UINT32);
+}
+
+// TEMP
+void encode_set_default_settings(gpu_cmd_encoder* encoder) {
+ VkViewport viewport = { 0 };
+ viewport.x = 0.0f;
+ viewport.y = 0.0f;
+ viewport.width = context.swapchain->extent.width;
+ viewport.height = context.swapchain->extent.height;
+ viewport.minDepth = 0.0f;
+ viewport.maxDepth = 1.0f;
+ vkCmdSetViewport(encoder->cmd_buffer, 0, 1, &viewport);
+
+ VkRect2D scissor = { 0 };
+ scissor.offset = (VkOffset2D){ 0, 0 };
+ scissor.extent = context.swapchain->extent;
+ vkCmdSetScissor(encoder->cmd_buffer, 0, 1, &scissor);
+}
+
+// --- Drawing
+
+bool gpu_backend_begin_frame() {
+ u32 current_frame = context.current_frame;
+ vkWaitForFences(context.device->logical_device, 1, &context.in_flight_fences[current_frame],
+ VK_TRUE, UINT64_MAX);
+
+ u32 image_index;
+ VkResult result = vkAcquireNextImageKHR(
+ context.device->logical_device, context.swapchain->handle, UINT64_MAX,
+ context.image_available_semaphores[current_frame], VK_NULL_HANDLE, &image_index);
+ if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR || context.is_resizing) {
+ ERROR("Acquire next image failure. recreate swapchain");
+ context.is_resizing = false;
+ recreate_swapchain(context.swapchain);
+ return false;
+ } else if (result != VK_SUCCESS) {
+ ERROR_EXIT("failed to acquire swapchain image");
+ }
+
+ vkResetFences(context.device->logical_device, 1, &context.in_flight_fences[current_frame]);
+
+ context.current_img_index = image_index;
+ VK_CHECK(vkResetCommandBuffer(context.main_cmd_bufs[current_frame].cmd_buffer, 0));
+ return true;
+}
+
+void gpu_temp_draw(size_t n_indices) {
+ gpu_cmd_encoder* encoder = gpu_get_default_cmd_encoder(); // &context.main_cmd_buf;
+ /* vkCmdDraw(encoder->cmd_buffer, n_verts, 1, 0, 0); */
+ vkCmdDrawIndexed(encoder->cmd_buffer, n_indices, 1, 0, 0, 0);
+}
+
+void gpu_backend_end_frame() {
+ VkPresentInfoKHR present_info = { VK_STRUCTURE_TYPE_PRESENT_INFO_KHR };
+ present_info.waitSemaphoreCount = 1;
+ present_info.pWaitSemaphores = &context.render_finished_semaphores[context.current_frame];
+
+ VkSwapchainKHR swapchains[] = { context.swapchain->handle };
+ present_info.swapchainCount = 1;
+ present_info.pSwapchains = swapchains;
+ present_info.pImageIndices = &context.current_img_index;
+
+ VkResult result = vkQueuePresentKHR(context.device->present_queue, &present_info);
+ if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR) {
+ ERROR("Queue present error. recreate swapchain");
+ recreate_swapchain(context.swapchain);
+ return;
+ } else if (result != VK_SUCCESS) {
+ ERROR_EXIT("failed to present swapchain image");
+ }
+ context.current_frame = (context.current_frame + 1) % MAX_FRAMES_IN_FLIGHT;
+
+ /* vkDeviceWaitIdle(context.device->logical_device); */
+}
+
+// TODO: Move into better order in file
+void gpu_queue_submit(gpu_cmd_buffer* buffer) {
+ VkSubmitInfo submit_info = { VK_STRUCTURE_TYPE_SUBMIT_INFO };
+
+ // Specify semaphore to wait on
+ VkSemaphore wait_semaphores[] = { context.image_available_semaphores[context.current_frame] };
+ VkPipelineStageFlags wait_stages[] = { VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT };
+
+ submit_info.waitSemaphoreCount = 1;
+ submit_info.pWaitSemaphores = wait_semaphores;
+ submit_info.pWaitDstStageMask = wait_stages;
+
+ // Specify semaphore to signal when finished executing buffer
+ VkSemaphore signal_semaphores[] = { context.render_finished_semaphores[context.current_frame] };
+ submit_info.signalSemaphoreCount = 1;
+ submit_info.pSignalSemaphores = signal_semaphores;
+
+ submit_info.commandBufferCount = 1;
+ submit_info.pCommandBuffers = &buffer->cmd_buffer;
+
+ VK_CHECK(vkQueueSubmit(context.device->graphics_queue, 1, &submit_info,
+ context.in_flight_fences[context.current_frame]));
+}
+
+inline void encode_draw_indexed(gpu_cmd_encoder* encoder, u64 index_count) {
+ vkCmdDrawIndexed(encoder->cmd_buffer, index_count, 1, 0, 0, 0);
+}
+
+bool select_physical_device(gpu_device* out_device) {
+ u32 physical_device_count = 0;
+ VK_CHECK(vkEnumeratePhysicalDevices(context.instance, &physical_device_count, 0));
+ if (physical_device_count == 0) {
+ FATAL("No devices that support vulkan were found");
+ return false;
+ }
+ TRACE("Number of devices found %d", physical_device_count);
+
+ VkPhysicalDevice* physical_devices =
+ arena_alloc(&context.temp_arena, physical_device_count * sizeof(VkPhysicalDevice));
+ VK_CHECK(vkEnumeratePhysicalDevices(context.instance, &physical_device_count, physical_devices));
+
+ bool found = false;
+ for (u32 device_i = 0; device_i < physical_device_count; device_i++) {
+ if (is_physical_device_suitable(physical_devices[device_i])) {
+ out_device->physical_device = physical_devices[device_i];
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ FATAL("Couldn't find a suitable physical device");
+ return false;
+ }
+
+ vkGetPhysicalDeviceProperties(out_device->physical_device, &out_device->properties);
+ vkGetPhysicalDeviceFeatures(out_device->physical_device, &out_device->features);
+ vkGetPhysicalDeviceMemoryProperties(out_device->physical_device, &out_device->memory);
+
+ return true;
+}
+
+bool is_physical_device_suitable(VkPhysicalDevice device) {
+ VkPhysicalDeviceProperties properties;
+ vkGetPhysicalDeviceProperties(device, &properties);
+
+ VkPhysicalDeviceFeatures features;
+ vkGetPhysicalDeviceFeatures(device, &features);
+
+ VkPhysicalDeviceMemoryProperties memory;
+ vkGetPhysicalDeviceMemoryProperties(device, &memory);
+
+ // TODO: Check against these device properties
+
+ queue_family_indices indices = find_queue_families(device);
+
+ vulkan_device_query_swapchain_support(device, context.surface, &context.swapchain_support);
+
+ return indices.has_graphics && indices.has_present && context.swapchain_support.mode_count > 0 &&
+ context.swapchain_support.format_count > 0;
+}
+
+queue_family_indices find_queue_families(VkPhysicalDevice device) {
+ queue_family_indices indices = { 0 };
+
+ u32 queue_family_count = 0;
+ vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, 0);
+
+ VkQueueFamilyProperties* queue_families =
+ arena_alloc(&context.temp_arena, queue_family_count * sizeof(VkQueueFamilyProperties));
+ vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, queue_families);
+
+ for (u32 q_fam_i = 0; q_fam_i < queue_family_count; q_fam_i++) {
+ // Graphics queue
+ if (queue_families[q_fam_i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
+ indices.graphics_family_index = q_fam_i;
+ indices.has_graphics = true;
+ }
+
+ VkBool32 present_support = false;
+ vkGetPhysicalDeviceSurfaceSupportKHR(device, q_fam_i, context.surface, &present_support);
+ if (present_support && !indices.has_present) {
+ indices.present_family_index = q_fam_i;
+ indices.has_present = true;
+ }
+ }
+
+ return indices;
+}
+
+bool create_logical_device(gpu_device* out_device) {
+ queue_family_indices indices = find_queue_families(out_device->physical_device);
+ INFO(" %s | %s | %s | %s | %s", bool_str(indices.has_graphics), bool_str(indices.has_present),
+ bool_str(indices.has_compute), bool_str(indices.has_transfer),
+ out_device->properties.deviceName);
+ TRACE("Graphics Family queue index: %d", indices.graphics_family_index);
+ TRACE("Present Family queue index: %d", indices.present_family_index);
+ TRACE("Compute Family queue index: %d", indices.compute_family_index);
+ TRACE("Transfer Family queue index: %d", indices.transfer_family_index);
+
+ // Queues
+ f32 prio_one = 1.0;
+ VkDeviceQueueCreateInfo queue_create_infos[1] = { 0 };
+ queue_create_infos[0].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
+ queue_create_infos[0].queueFamilyIndex = indices.graphics_family_index;
+ queue_create_infos[0].queueCount = 1;
+ queue_create_infos[0].pQueuePriorities = &prio_one;
+ queue_create_infos[0].flags = 0;
+ queue_create_infos[0].pNext = 0;
+
+ // queue_create_infos[1].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
+ // queue_create_infos[1].queueFamilyIndex = indices.present_family_index;
+ // queue_create_infos[1].queueCount = 1;
+ // queue_create_infos[1].pQueuePriorities = &prio_one;
+ // queue_create_infos[1].flags = 0;
+ // queue_create_infos[1].pNext = 0;
+
+ // Features
+ VkPhysicalDeviceFeatures device_features = { 0 };
+ device_features.samplerAnisotropy = VK_TRUE; // request anistrophy
+
+ // Device itself
+ VkDeviceCreateInfo device_create_info = { VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO };
+ device_create_info.queueCreateInfoCount = 1;
+ device_create_info.pQueueCreateInfos = queue_create_infos;
+ device_create_info.pEnabledFeatures = &device_features;
+ device_create_info.enabledExtensionCount = 1;
+ const char* extension_names = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
+ device_create_info.ppEnabledExtensionNames = &extension_names;
+
+ // deprecated
+ device_create_info.enabledLayerCount = 0;
+ device_create_info.ppEnabledLayerNames = 0;
+
+ VkResult result = vkCreateDevice(context.device->physical_device, &device_create_info,
+ context.allocator, &context.device->logical_device);
+ if (result != VK_SUCCESS) {
+ printf("error creating logical device with status %u\n", result);
+ ERROR_EXIT("Unable to create vulkan logical device. Exiting..");
+ }
+ TRACE("Logical device created");
+
+ context.device->queue_family_indicies = indices;
+
+ // Retrieve queue handles
+ vkGetDeviceQueue(context.device->logical_device, indices.graphics_family_index, 0,
+ &context.device->graphics_queue);
+ vkGetDeviceQueue(context.device->logical_device, indices.present_family_index, 0,
+ &context.device->present_queue);
+
+ return true;
+}
+
+VkShaderModule create_shader_module(str8 spirv) {
+ VkShaderModuleCreateInfo create_info = { VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO };
+ create_info.codeSize = spirv.len;
+ create_info.pCode = (uint32_t*)spirv.buf;
+
+ VkShaderModule shader_module;
+ VK_CHECK(vkCreateShaderModule(context.device->logical_device, &create_info, context.allocator,
+ &shader_module));
+
+ return shader_module;
+}
+
+void create_descriptor_pools() {}
+
+void create_swapchain_framebuffers() {
+ WARN("Recreating framebuffers...");
+ u32 image_count = context.swapchain->image_count;
+ context.swapchain_framebuffers =
+ arena_alloc(&context.swapchain->swapchain_arena, image_count * sizeof(VkFramebuffer));
+ for (u32 i = 0; i < image_count; i++) {
+ VkImageView attachments[1] = { context.swapchain->image_views[i] };
+
+ VkFramebufferCreateInfo framebuffer_create_info = { VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO };
+ framebuffer_create_info.attachmentCount = 1;
+ framebuffer_create_info.pAttachments = attachments;
+
+ framebuffer_create_info.renderPass =
+ context.main_renderpass; // TODO: description.renderpass->handle;
+ framebuffer_create_info.width = context.swapchain->extent.width;
+ framebuffer_create_info.height = context.swapchain->extent.height;
+ framebuffer_create_info.layers = 1;
+
+ VK_CHECK(vkCreateFramebuffer(context.device->logical_device, &framebuffer_create_info,
+ context.allocator, &context.swapchain_framebuffers[i]));
+ }
+}
+
+void create_sync_objects() {
+ VkSemaphoreCreateInfo semaphore_info = { VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO };
+ VkFenceCreateInfo fence_info = { VK_STRUCTURE_TYPE_FENCE_CREATE_INFO };
+ fence_info.flags = VK_FENCE_CREATE_SIGNALED_BIT;
+
+ for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) {
+ VK_CHECK(vkCreateSemaphore(context.device->logical_device, &semaphore_info, context.allocator,
+ &context.image_available_semaphores[i]););
+ VK_CHECK(vkCreateSemaphore(context.device->logical_device, &semaphore_info, context.allocator,
+ &context.render_finished_semaphores[i]););
+
+ VK_CHECK(vkCreateFence(context.device->logical_device, &fence_info, context.allocator,
+ &context.in_flight_fences[i]));
+ }
+}
+
+static i32 find_memory_index(u32 type_filter, u32 property_flags) {
+ VkPhysicalDeviceMemoryProperties memory_properties;
+ vkGetPhysicalDeviceMemoryProperties(context.device->physical_device, &memory_properties);
+
+ for (u32 i = 0; i < memory_properties.memoryTypeCount; ++i) {
+ // Check each memory type to see if its bit is set to 1.
+ if (type_filter & (1 << i) &&
+ (memory_properties.memoryTypes[i].propertyFlags & property_flags) == property_flags) {
+ return i;
+ }
+ }
+
+ WARN("Unable to find suitable memory type!");
+ return -1;
+}
+
+buffer_handle gpu_buffer_create(u64 size, gpu_buffer_type buf_type, gpu_buffer_flags flags,
+ const void* data) {
+ VkBufferCreateInfo buffer_info = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
+ buffer_info.size = size;
+ buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+
+ switch (buf_type) {
+ case CEL_BUFFER_DEFAULT:
+ buffer_info.usage |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
+ break;
+ case CEL_BUFFER_VERTEX:
+ buffer_info.usage |= VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
+ break;
+ case CEL_BUFFER_INDEX:
+ buffer_info.usage |= VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
+ break;
+ case CEL_BUFFER_UNIFORM:
+ buffer_info.usage |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+ break;
+ case CEL_BUFFER_COUNT:
+ WARN("Incorrect gpu_buffer_type provided. using default");
+ break;
+ }
+
+ buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+
+ // "allocating" the cpu-side buffer struct
+ /* gpu_buffer buffer; */
+ /* buffer.size = size; */
+ buffer_handle handle;
+ gpu_buffer* buffer = buffer_pool_alloc(&context.resource_pools->buffers, &handle);
+ buffer->size = size;
+
+ VK_CHECK(vkCreateBuffer(context.device->logical_device, &buffer_info, context.allocator,
+ &buffer->handle));
+
+ VkMemoryRequirements requirements;
+ vkGetBufferMemoryRequirements(context.device->logical_device, buffer->handle, &requirements);
+
+ // Just make them always need all of them for now
+ i32 memory_index =
+ find_memory_index(requirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
+
+ // Allocate the actual VRAM
+ VkMemoryAllocateInfo allocate_info = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+ allocate_info.allocationSize = requirements.size;
+ allocate_info.memoryTypeIndex = (u32)memory_index;
+
+ vkAllocateMemory(context.device->logical_device, &allocate_info, context.allocator,
+ &buffer->memory);
+ vkBindBufferMemory(context.device->logical_device, buffer->handle, buffer->memory, 0);
+
+ /* Now there are two options:
+ * 1. create CPU-accessible memory -> map memory -> memcpy -> unmap
+ * 2. use a staging buffer thats CPU-accessible and copy its contents to a
+ * GPU-only buffer
+ */
+
+ /* context.buffers[context.buffer_count] = buffer; */
+ /* context.buffer_count++; */
+
+ if (data) {
+ TRACE("Upload data as part of buffer creation");
+ if (flags & CEL_BUFFER_FLAG_CPU) {
+ // map memory -> copy data in -> unmap memory
+ buffer_upload_bytes(handle, (bytebuffer){ .buf = (u8*)data, .size = size }, 0, size);
+ } else if (flags & CEL_BUFFER_FLAG_GPU) {
+ TRACE("Uploading data to buffer using staging buffer");
+ // Create a staging buffer
+ buffer_handle staging = gpu_buffer_create(size, buf_type, CEL_BUFFER_FLAG_CPU, NULL);
+
+ // Copy data into it
+ buffer_upload_bytes(staging, (bytebuffer){ .buf = (u8*)data, .size = size }, 0, size);
+
+ // Enqueue a copy from the staging buffer into the DEVICE_LOCAL buffer
+ gpu_cmd_encoder temp_encoder = gpu_cmd_encoder_create();
+ gpu_cmd_encoder_begin(temp_encoder);
+ encode_buffer_copy(&temp_encoder, staging, 0, handle, 0, size);
+ gpu_cmd_buffer copy_cmd_buffer = gpu_cmd_encoder_finish(&temp_encoder);
+
+ VkSubmitInfo submit_info = { VK_STRUCTURE_TYPE_SUBMIT_INFO };
+ submit_info.commandBufferCount = 1;
+ submit_info.pCommandBuffers = &temp_encoder.cmd_buffer;
+ vkQueueSubmit(context.device->graphics_queue, 1, &submit_info, VK_NULL_HANDLE);
+
+ // Cleanup
+ vkQueueWaitIdle(context.device->graphics_queue);
+ gpu_cmd_encoder_destroy(&temp_encoder);
+ gpu_buffer_destroy(staging);
+ }
+ }
+
+ return handle;
+}
+
+void gpu_buffer_destroy(buffer_handle buffer) {
+ gpu_buffer* b = buffer_pool_get(&context.resource_pools->buffers, buffer);
+ vkDestroyBuffer(context.device->logical_device, b->handle, context.allocator);
+ vkFreeMemory(context.device->logical_device, b->memory, context.allocator);
+ buffer_pool_dealloc(&context.resource_pools->buffers, buffer);
+}
+
+// Upload data to a
+void buffer_upload_bytes(buffer_handle gpu_buf, bytebuffer cpu_buf, u64 offset, u64 size) {
+ gpu_buffer* buffer = buffer_pool_get(&context.resource_pools->buffers, gpu_buf);
+ void* data_ptr;
+ vkMapMemory(context.device->logical_device, buffer->memory, 0, size, 0, &data_ptr);
+ DEBUG("Uploading %d bytes to buffer", size);
+ memcpy(data_ptr, cpu_buf.buf, size);
+ vkUnmapMemory(context.device->logical_device, buffer->memory);
+}
+
+void encode_buffer_copy(gpu_cmd_encoder* encoder, buffer_handle src, u64 src_offset,
+ buffer_handle dst, u64 dst_offset, u64 copy_size) {
+ VkBufferCopy copy_region;
+ copy_region.srcOffset = src_offset;
+ copy_region.dstOffset = dst_offset;
+ copy_region.size = copy_size;
+
+ gpu_buffer* src_buf = buffer_pool_get(&context.resource_pools->buffers, src);
+ gpu_buffer* dst_buf = buffer_pool_get(&context.resource_pools->buffers, dst);
+ vkCmdCopyBuffer(encoder->cmd_buffer, src_buf->handle, dst_buf->handle, 1, &copy_region);
+}
+
+// one-shot command buffers
+VkCommandBuffer vulkan_command_buffer_create_oneshot() {
+ VkCommandBufferAllocateInfo alloc_info = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO };
+ alloc_info.commandPool = context.device->pool;
+ alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
+ alloc_info.commandBufferCount = 1;
+ alloc_info.pNext = 0;
+
+ VkCommandBuffer cmd_buffer;
+ vkAllocateCommandBuffers(context.device->logical_device, &alloc_info, &cmd_buffer);
+
+ VkCommandBufferBeginInfo begin_info = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO };
+ begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+
+ vkBeginCommandBuffer(cmd_buffer, &begin_info);
+
+ return cmd_buffer;
+}
+
+void vulkan_command_buffer_finish_oneshot(VkCommandBuffer cmd_buffer) {
+ VK_CHECK(vkEndCommandBuffer(cmd_buffer));
+
+ // submit to queue
+ VkSubmitInfo submit_info = { VK_STRUCTURE_TYPE_SUBMIT_INFO };
+ submit_info.commandBufferCount = 1;
+ submit_info.pCommandBuffers = &cmd_buffer;
+ VK_CHECK(vkQueueSubmit(context.device->graphics_queue, 1, &submit_info, 0));
+ VK_CHECK(vkQueueWaitIdle(context.device->graphics_queue));
+
+ vkFreeCommandBuffers(context.device->logical_device, context.device->pool, 1, &cmd_buffer);
+}
+
+void copy_buffer_to_buffer_oneshot(buffer_handle src, u64 src_offset, buffer_handle dst,
+ u64 dst_offset, u64 copy_size) {
+ VkBufferCopy copy_region;
+ copy_region.srcOffset = src_offset;
+ copy_region.dstOffset = dst_offset;
+ copy_region.size = copy_size;
+
+ gpu_buffer* src_buf = buffer_pool_get(&context.resource_pools->buffers, src);
+ gpu_buffer* dst_buf = buffer_pool_get(&context.resource_pools->buffers, dst);
+ VkCommandBuffer temp_cmd_buffer = vulkan_command_buffer_create_oneshot();
+ vkCmdCopyBuffer(temp_cmd_buffer, src_buf->handle, dst_buf->handle, 1, &copy_region);
+ vulkan_command_buffer_finish_oneshot(temp_cmd_buffer);
+}
+
+void copy_buffer_to_image_oneshot(buffer_handle src, texture_handle dst) {
+ gpu_buffer* src_buf = buffer_pool_get(&context.resource_pools->buffers, src);
+ gpu_texture* dst_tex = texture_pool_get(&context.resource_pools->textures, dst);
+
+ VkCommandBuffer temp_cmd_buffer = vulkan_command_buffer_create_oneshot();
+
+ VkBufferImageCopy region;
+ region.bufferOffset = 0;
+ region.bufferRowLength = 0;
+ region.bufferImageHeight = 0;
+ region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ region.imageSubresource.mipLevel = 0;
+ region.imageSubresource.baseArrayLayer = 0;
+ region.imageSubresource.layerCount = 1;
+ printf("Image details width: %d height %d\n", dst_tex->desc.extents.x, dst_tex->desc.extents.y);
+ region.imageOffset.x = 0;
+ region.imageOffset.y = 0;
+ region.imageOffset.z = 0;
+ region.imageExtent.width = dst_tex->desc.extents.x;
+ region.imageExtent.height = dst_tex->desc.extents.y;
+ region.imageExtent.depth = 1;
+
+ vkCmdCopyBufferToImage(temp_cmd_buffer, src_buf->handle, dst_tex->handle,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
+
+ vulkan_command_buffer_finish_oneshot(temp_cmd_buffer);
+}
+
+VkImage vulkan_image_create(u32x2 dimensions, VkImageType image_type, VkFormat format,
+ VkImageUsageFlags usage) {
+ VkImage image;
+
+ VkImageCreateInfo image_create_info = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
+ image_create_info.imageType = VK_IMAGE_TYPE_2D;
+ image_create_info.extent.width = dimensions.x;
+ image_create_info.extent.height = dimensions.y;
+ image_create_info.extent.depth = 1;
+ image_create_info.mipLevels = 1;
+ image_create_info.arrayLayers = 1;
+ image_create_info.format = format;
+ image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
+ image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ image_create_info.usage = usage; // VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
+ image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
+
+ VK_CHECK(
+ vkCreateImage(context.device->logical_device, &image_create_info, context.allocator, &image));
+
+ return image;
+}
+
+texture_handle gpu_texture_create(texture_desc desc, bool create_view, const void* data) {
+ VkDeviceSize image_size = desc.extents.x * desc.extents.y * 4;
+ // FIXME: handle this properly
+ VkFormat format = desc.format == CEL_TEXTURE_FORMAT_8_8_8_8_RGBA_UNORM ? VK_FORMAT_R8G8B8A8_SRGB
+ : VK_FORMAT_D32_SFLOAT;
+
+ VkImage image; // vulkan_image_create(desc.extents, VK_IMAGE_TYPE_2D, format,
+ // VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT);
+ VkDeviceMemory image_memory;
+
+ VkImageCreateInfo image_create_info = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
+ image_create_info.imageType = VK_IMAGE_TYPE_2D;
+ image_create_info.extent.width = desc.extents.x;
+ image_create_info.extent.height = desc.extents.y;
+ image_create_info.extent.depth = 1;
+ image_create_info.mipLevels = 1;
+ image_create_info.arrayLayers = 1;
+ image_create_info.format = format;
+ image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
+ image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
+ if (format == VK_FORMAT_D32_SFLOAT) {
+ image_create_info.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ }
+ image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
+
+ VK_CHECK(
+ vkCreateImage(context.device->logical_device, &image_create_info, context.allocator, &image));
+
+ VkMemoryRequirements memory_reqs;
+ vkGetImageMemoryRequirements(context.device->logical_device, image, &memory_reqs);
+
+ VkMemoryAllocateInfo alloc_info = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
+ alloc_info.allocationSize = memory_reqs.size;
+ alloc_info.memoryTypeIndex =
+ find_memory_index(memory_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
+ vkAllocateMemory(context.device->logical_device, &alloc_info, context.allocator, &image_memory);
+
+ vkBindImageMemory(context.device->logical_device, image, image_memory, 0);
+
+ texture_handle handle;
+ gpu_texture* texture = texture_pool_alloc(&context.resource_pools->textures, &handle);
+ DEBUG("Allocated texture with handle %d", handle.raw);
+ texture->handle = image;
+ texture->debug_label = "Test Texture";
+ texture->desc = desc;
+ texture->memory = image_memory;
+ texture->size = image_size;
+
+ if (data) {
+ TRACE("Uploading pixel data to texture using staging buffer");
+ // Create a staging buffer
+ buffer_handle staging =
+ gpu_buffer_create(image_size, CEL_BUFFER_DEFAULT, CEL_BUFFER_FLAG_CPU, NULL);
+ // Copy data into it
+ vulkan_transition_image_layout(texture, format, VK_IMAGE_LAYOUT_UNDEFINED,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+ buffer_upload_bytes(staging, (bytebuffer){ .buf = (u8*)data, .size = image_size }, 0,
+ image_size);
+ copy_buffer_to_image_oneshot(staging, handle);
+ vulkan_transition_image_layout(texture, format, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
+
+ gpu_buffer_destroy(staging);
+ }
+
+ // Texture View
+ if (create_view) {
+ VkImageViewCreateInfo view_create_info = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO };
+ view_create_info.image = image;
+ view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
+ view_create_info.format = format;
+ view_create_info.subresourceRange.aspectMask =
+ format == VK_FORMAT_D32_SFLOAT ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
+
+ view_create_info.subresourceRange.baseMipLevel = 0;
+ view_create_info.subresourceRange.levelCount = 1;
+ view_create_info.subresourceRange.baseArrayLayer = 0;
+ view_create_info.subresourceRange.layerCount = 1;
+
+ VK_CHECK(vkCreateImageView(context.device->logical_device, &view_create_info, context.allocator,
+ &texture->view));
+ }
+
+ // Sampler
+ VkSamplerCreateInfo sampler_info = { VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO };
+ sampler_info.magFilter = VK_FILTER_LINEAR;
+ sampler_info.minFilter = VK_FILTER_LINEAR;
+ sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
+ sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
+ sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
+ sampler_info.anisotropyEnable = VK_TRUE;
+ sampler_info.maxAnisotropy = 16;
+ sampler_info.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK;
+ sampler_info.unnormalizedCoordinates = VK_FALSE;
+ sampler_info.compareEnable = VK_FALSE;
+ sampler_info.compareOp = VK_COMPARE_OP_ALWAYS;
+ sampler_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
+ sampler_info.mipLodBias = 0.0;
+ sampler_info.minLod = 0.0;
+ sampler_info.maxLod = 0.0;
+
+ VkResult res = vkCreateSampler(context.device->logical_device, &sampler_info, context.allocator,
+ &texture->sampler);
+ if (res != VK_SUCCESS) {
+ ERROR("Error creating texture sampler for image %s", texture->debug_label);
+ exit(1);
+ }
+
+ return handle;
+}
+
+void vulkan_transition_image_layout(gpu_texture* texture, VkFormat format, VkImageLayout old_layout,
+ VkImageLayout new_layout) {
+ VkCommandBuffer temp_cmd_buffer = vulkan_command_buffer_create_oneshot();
+
+ VkImageMemoryBarrier barrier = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER };
+ barrier.oldLayout = old_layout;
+ barrier.newLayout = new_layout;
+ barrier.srcQueueFamilyIndex = context.device->queue_family_indicies.graphics_family_index;
+ barrier.dstQueueFamilyIndex = context.device->queue_family_indicies.graphics_family_index;
+ barrier.image = texture->handle;
+ barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ barrier.subresourceRange.baseMipLevel = 0;
+ barrier.subresourceRange.levelCount = 1;
+ barrier.subresourceRange.baseArrayLayer = 0;
+ barrier.subresourceRange.layerCount = 1;
+ barrier.srcAccessMask = 0; // TODO
+ barrier.dstAccessMask = 0; // TODO
+
+ VkPipelineStageFlags source_stage;
+ VkPipelineStageFlags dest_stage;
+
+ if (old_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
+ new_layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
+ barrier.srcAccessMask = 0;
+ barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+
+ source_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+ dest_stage = VK_PIPELINE_STAGE_TRANSFER_BIT;
+
+ } else if (old_layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL &&
+ new_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
+ barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ source_stage = VK_PIPELINE_STAGE_TRANSFER_BIT;
+ dest_stage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ } else {
+ FATAL("Unsupported image layout transition");
+ return;
+ }
+
+ vkCmdPipelineBarrier(temp_cmd_buffer, source_stage, dest_stage, 0, 0, 0, 0, 0, 1, &barrier);
+
+ vulkan_command_buffer_finish_oneshot(temp_cmd_buffer);
+}
+
+/* TYPED_POOL(gpu_buffer, buffer); */
+/* TYPED_POOL(gpu_texture, texture); */
+
+/* void resource_pools_init(arena* a, struct resource_pools* res_pools) { */
+/* buffer_pool buf_pool = buffer_pool_create(a, MAX_BUFFERS, sizeof(gpu_buffer)); */
+/* res_pools->buffers = buf_pool; */
+/* texture_pool tex_pool = texture_pool_create(a, MAX_TEXTURES, sizeof(gpu_texture)); */
+/* res_pools->textures = tex_pool; */
+
+/* context.resource_pools = res_pools; */
+/* } */
+
+#endif
diff --git a/archive/src/render/archive/backends/vulkan/backend_vulkan.h b/archive/src/render/archive/backends/vulkan/backend_vulkan.h
new file mode 100644
index 0000000..6ca0bb5
--- /dev/null
+++ b/archive/src/render/archive/backends/vulkan/backend_vulkan.h
@@ -0,0 +1,118 @@
+#pragma once
+#include "defines.h"
+#if defined(CEL_REND_BACKEND_VULKAN)
+#include <vulkan/vk_platform.h>
+#include <vulkan/vulkan.h>
+#include <vulkan/vulkan_core.h>
+
+#include "mem.h"
+#include "ral.h"
+#include "ral_types.h"
+
+#define MAX_FRAMES_IN_FLIGHT 2
+#define GPU_SWAPCHAIN_IMG_COUNT 2
+
+/*
+Conventions:
+ - Place the 'handle' as the first field of a struct
+ - Vulkan specific data goes at the top, followed by our internal data
+*/
+
+typedef struct queue_family_indices {
+ u32 graphics_family_index;
+ u32 present_family_index;
+ u32 compute_family_index;
+ u32 transfer_family_index;
+ bool has_graphics;
+ bool has_present;
+ bool has_compute;
+ bool has_transfer;
+} queue_family_indices;
+
+// typedef struct vulkan_framebuffer {
+// } vulkan_framebuffer;
+
+typedef struct gpu_swapchain {
+ VkSwapchainKHR handle;
+ arena swapchain_arena;
+ VkExtent2D extent;
+ u32x2 dimensions;
+ VkSurfaceFormatKHR image_format;
+ VkPresentModeKHR present_mode;
+ u32 image_count;
+ VkImage* images;
+ VkImageView* image_views;
+} gpu_swapchain;
+
+typedef struct gpu_device {
+ // In Vulkan we store both physical and logical device here
+ VkPhysicalDevice physical_device;
+ VkDevice logical_device;
+ VkPhysicalDeviceProperties properties;
+ VkPhysicalDeviceFeatures features;
+ VkPhysicalDeviceMemoryProperties memory;
+ queue_family_indices queue_family_indicies;
+ VkQueue graphics_queue;
+ VkQueue present_queue;
+ VkQueue compute_queue;
+ VkQueue transfer_queue;
+ VkCommandPool pool;
+} gpu_device;
+
+typedef struct gpu_pipeline_layout {
+ VkPipelineLayout handle;
+} gpu_pipeline_layout;
+
+typedef struct desc_set_uniform_buffer {
+ VkBuffer buffers[MAX_FRAMES_IN_FLIGHT];
+ VkDeviceMemory uniform_buf_memorys[MAX_FRAMES_IN_FLIGHT];
+ void* uniform_buf_mem_mappings[MAX_FRAMES_IN_FLIGHT];
+ size_t size;
+} desc_set_uniform_buffer;
+
+typedef struct gpu_pipeline {
+ VkPipeline handle;
+ VkPipelineLayout layout_handle;
+
+ // Descriptor gubbins
+ shader_data data_layouts[MAX_SHADER_DATA_LAYOUTS];
+ u32 data_layouts_count;
+
+ VkDescriptorSetLayout* desc_set_layouts;
+ // Based on group, we know which data to load
+ desc_set_uniform_buffer* uniform_pointers;
+ u32 desc_set_layouts_count;
+
+} gpu_pipeline;
+
+typedef struct gpu_renderpass {
+ VkRenderPass handle;
+ // TODO: Where to store framebuffers? VkFramebuffer framebuffers[GPU_SWAPCHAIN_IMG_COUNT];
+} gpu_renderpass;
+
+typedef struct gpu_cmd_encoder {
+ VkCommandBuffer cmd_buffer;
+ VkDescriptorPool descriptor_pool;
+ gpu_pipeline* pipeline;
+} gpu_cmd_encoder;
+
+typedef struct gpu_cmd_buffer {
+ VkCommandBuffer cmd_buffer;
+} gpu_cmd_buffer;
+
+typedef struct gpu_buffer {
+ VkBuffer handle;
+ VkDeviceMemory memory;
+ u64 size;
+} gpu_buffer;
+
+typedef struct gpu_texture {
+ VkImage handle;
+ VkDeviceMemory memory;
+ u64 size;
+ texture_desc desc;
+ VkImageView view;
+ VkSampler sampler;
+ char* debug_label;
+} gpu_texture;
+#endif \ No newline at end of file
diff --git a/archive/src/render/immdraw.c b/archive/src/render/immdraw.c
new file mode 100644
index 0000000..8a10c65
--- /dev/null
+++ b/archive/src/render/immdraw.c
@@ -0,0 +1,176 @@
+#include "immdraw.h"
+#include "core.h"
+#include "file.h"
+#include "log.h"
+#include "maths.h"
+#include "maths_types.h"
+#include "primitives.h"
+#include "ral_common.h"
+#include "ral_impl.h"
+#include "ral_types.h"
+#include "render.h"
+#include "render_types.h"
+#include "shader_layouts.h"
+
+void Immdraw_Init(Immdraw_Storage* storage) {
+ INFO("Immediate drawing initialisation");
+
+ // Meshes
+ Geometry sphere_geo = Geo_CreateUVsphere(1.0, 16, 16);
+ storage->sphere = Mesh_Create(&sphere_geo, true);
+
+ Geometry cube_geo = Geo_CreateCuboid(f32x3(1.0, 1.0, 1.0));
+ storage->cube = Mesh_Create(&cube_geo, true);
+
+ Geometry plane_geo = Geo_CreatePlane(f32x2(1.0, 1.0), 1, 1);
+ storage->plane = Mesh_Create(&plane_geo, true);
+
+ Geometry cone_geo = Geo_CreateCone(1.0, 1.0, 8);
+ storage->cone = Mesh_Create(&cone_geo, true);
+
+ Geometry cyl_geo = Geo_CreateCylinder(1.0, 2.0, 8);
+ storage->cylinder = Mesh_Create(&cyl_geo, true);
+
+ storage->bbox = GenBboxMesh();
+
+ // Pipeline / material
+ VertexDescription vertex_desc = {
+ .debug_label = "Immdraw Vertex",
+ .use_full_vertex_size = true,
+ };
+ VertexDesc_AddAttr(&vertex_desc, "position", ATTR_F32x3);
+ VertexDesc_AddAttr(&vertex_desc, "normal", ATTR_F32x3);
+
+ const char* vert_path = "assets/shaders/immdraw.vert";
+ const char* frag_path = "assets/shaders/immdraw.frag";
+ const char* vert_shader = string_from_file(vert_path);
+ const char* frag_shader = string_from_file(frag_path);
+
+ ShaderDataLayout camera_data = Binding_Camera_GetLayout(NULL);
+ ShaderDataLayout imm_uniform_data = ImmediateUniforms_GetLayout(NULL);
+
+ GraphicsPipelineDesc pipeline_desc = {
+ .debug_name = "Immediate Draw Pipeline",
+ .vertex_desc = static_3d_vertex_description(),
+ .data_layouts = { camera_data, imm_uniform_data },
+ .data_layouts_count = 2,
+ .vs = { .debug_name = "Immdraw Vertex Shader", .filepath = vert_path, .code = vert_shader },
+ .fs = { .debug_name = "Immdraw Fragment Shader", .filepath = frag_path, .code = frag_shader },
+ .depth_test = true,
+ .wireframe = true,
+ };
+ GPU_Renderpass* rpass =
+ GPU_Renderpass_Create((GPU_RenderpassDesc){ .default_framebuffer = true });
+ storage->colour_pipeline = GPU_GraphicsPipeline_Create(pipeline_desc, rpass);
+}
+
+void Immdraw_Shutdown(Immdraw_Storage* storage) {
+ GraphicsPipeline_Destroy(storage->colour_pipeline);
+}
+
+void Immdraw_Sphere(Transform tf, Vec4 colour, bool wireframe) {
+ TRACE("Draw sphere");
+ Immdraw_Storage* imm = Render_GetImmdrawStorage();
+ Immdraw_Primitive(tf, CEL_TRI, 1.0, colour, wireframe, imm->sphere);
+}
+void Immdraw_Cuboid(Transform tf, Vec4 colour, bool wireframe) {
+ TRACE("Draw cube");
+ Immdraw_Storage* imm = Render_GetImmdrawStorage();
+ Immdraw_Primitive(tf, CEL_TRI, 1.0, colour, wireframe, imm->cube);
+}
+void Immdraw_Plane(Transform tf, Vec4 colour, bool wireframe) {
+ TRACE("Draw plane");
+ Immdraw_Storage* imm = Render_GetImmdrawStorage();
+ Immdraw_Primitive(tf, CEL_TRI, 1.0, colour, wireframe, imm->plane);
+}
+
+void Immdraw_Bbox(Transform tf, Vec4 colour, bool wireframe) {
+ TRACE("Draw bbox");
+ Immdraw_Storage* imm = Render_GetImmdrawStorage();
+ Immdraw_Primitive(tf, CEL_LINE, 1.0, colour, wireframe, imm->bbox);
+}
+
+void Immdraw_Cylinder(Transform tf, Vec4 colour, bool wireframe) {
+ TRACE("Draw cylinder");
+ Immdraw_Storage* imm = Render_GetImmdrawStorage();
+ Immdraw_Primitive(tf, CEL_TRI, 1.0, colour, wireframe, imm->cylinder);
+}
+
+void Immdraw_Cone(Transform tf, Vec4 colour, bool wireframe) {
+ TRACE("Draw cone");
+ Immdraw_Storage* imm = Render_GetImmdrawStorage();
+ Immdraw_Primitive(tf, CEL_TRI, 1.0, colour, wireframe, imm->cone);
+}
+
+void Immdraw_Primitive(Transform tf, PrimitiveTopology topology, f32 size, Vec4 colour,
+ bool wireframe, Mesh mesh) {
+ Immdraw_Storage* imm = Render_GetImmdrawStorage();
+ GPU_CmdEncoder* enc = GPU_GetDefaultEncoder();
+
+ // begin renderpass
+ GPU_CmdEncoder_BeginRender(enc, imm->colour_pipeline->renderpass);
+ // bind pipeline
+ GPU_EncodeBindPipeline(enc, imm->colour_pipeline);
+
+ // TODO: implement wireframe in other apis
+#if defined(CEL_REND_BACKEND_OPENGL)
+#include <glad/glad.h>
+ if (wireframe) {
+ glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
+ } else {
+ glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
+ }
+#endif
+
+ // update uniforms
+ ImmediateUniforms uniforms = {
+ .model = transform_to_mat(&tf),
+ .colour = colour,
+ };
+ Mat4 view, proj;
+ u32x2 dimensions = GPU_Swapchain_GetDimensions();
+ RenderScene* scene = Render_GetScene();
+ Camera_ViewProj(&scene->camera, (f32)dimensions.x, (f32)dimensions.y, &view, &proj);
+ Binding_Camera camera_data = { .view = view,
+ .projection = proj,
+ .viewPos = vec4(scene->camera.position.x, scene->camera.position.y,
+ scene->camera.position.z, 1.0) };
+ GPU_EncodeBindShaderData(enc, 0, Binding_Camera_GetLayout(&camera_data));
+ GPU_EncodeBindShaderData(enc, 1, ImmediateUniforms_GetLayout(&uniforms));
+
+ // draw call
+ GPU_EncodeSetVertexBuffer(enc, mesh.vertex_buffer);
+ GPU_EncodeSetIndexBuffer(enc, mesh.index_buffer);
+ GPU_EncodeDrawIndexed(enc, topology, mesh.geometry.index_count);
+
+ // end renderpass
+ GPU_CmdEncoder_EndRender(enc);
+}
+
+Mesh GenBboxMesh() {
+ Vertex_darray* vertices = Vertex_darray_new(8);
+ u32_darray* indices = u32_darray_new(24);
+
+ // normals & uvs dont matter
+ VERT_3D(vertices, FRONT_BOT_LEFT, VEC3_NEG_Z, vec2(0, 0));
+ VERT_3D(vertices, FRONT_BOT_RIGHT, VEC3_NEG_Z, vec2(0, 0));
+ VERT_3D(vertices, BACK_BOT_LEFT, VEC3_NEG_Z, vec2(0, 0));
+ VERT_3D(vertices, BACK_BOT_RIGHT, VEC3_NEG_Z, vec2(0, 0));
+ VERT_3D(vertices, FRONT_TOP_LEFT, VEC3_NEG_Z, vec2(0, 0));
+ VERT_3D(vertices, FRONT_TOP_RIGHT, VEC3_NEG_Z, vec2(0, 0));
+ VERT_3D(vertices, BACK_TOP_LEFT, VEC3_NEG_Z, vec2(0, 0));
+ VERT_3D(vertices, BACK_TOP_RIGHT, VEC3_NEG_Z, vec2(0, 0));
+
+ u32 line_indices[24] = { 0, 1, 2, 3, 0, 2, 1, 3, 4, 5, 6, 7, 4, 6, 5, 7, 0, 4, 1, 5, 2, 6, 3, 7 };
+ for (u32 i = 0; i < 24; i++) {
+ u32_darray_push(indices, line_indices[i]);
+ }
+
+ Geometry geo = { .format = VERTEX_STATIC_3D,
+ .has_indices = true,
+ .index_count = indices->len,
+ .vertices = vertices,
+ .indices = indices };
+
+ return Mesh_Create(&geo, true);
+}
diff --git a/archive/src/render/immdraw.h b/archive/src/render/immdraw.h
new file mode 100644
index 0000000..2911350
--- /dev/null
+++ b/archive/src/render/immdraw.h
@@ -0,0 +1,63 @@
+/**
+ * @brief Immediate-mode drawing APIs
+ */
+
+#pragma once
+#include "defines.h"
+#include "maths_types.h"
+#include "ral_impl.h"
+#include "ral_types.h"
+#include "render_types.h"
+
+typedef struct Immdraw_Storage {
+ Mesh plane;
+ Mesh cube;
+ Mesh sphere;
+ Mesh cylinder;
+ Mesh cone;
+ Mesh bbox;
+ GPU_Pipeline* colour_pipeline; /** @brief Pipeline for drawing geometry that has vertex colours */
+} Immdraw_Storage;
+
+typedef struct ImmediateUniforms {
+ Mat4 model;
+ Vec4 colour;
+} ImmediateUniforms;
+
+// --- Public API
+
+PUB void Immdraw_Init(Immdraw_Storage* storage);
+PUB void Immdraw_Shutdown(Immdraw_Storage* storage);
+
+// These functions cause a pipeline switch and so aren't optimised for performance
+PUB void Immdraw_Plane(Transform tf, Vec4 colour, bool wireframe);
+PUB void Immdraw_Cuboid(Transform tf, Vec4 colour, bool wireframe);
+PUB void Immdraw_Cylinder(Transform tf, Vec4 colour, bool wireframe);
+PUB void Immdraw_Cone(Transform tf, Vec4 colour, bool wireframe);
+PUB void Immdraw_Sphere(Transform tf, Vec4 colour, bool wireframe);
+PUB void Immdraw_Bbox(Transform tf, Vec4 colour, bool wireframe);
+
+PUB void Immdraw_TransformGizmo(Transform tf, f32 size);
+
+// --- Internal
+
+void Immdraw_Primitive(Transform tf, PrimitiveTopology topology, f32 size, Vec4 colour,
+ bool wireframe, Mesh mesh);
+
+Mesh GenBboxMesh();
+
+static ShaderDataLayout ImmediateUniforms_GetLayout(void* data) {
+ ImmediateUniforms* d = (ImmediateUniforms*)data;
+ bool has_data = data != NULL;
+
+ ShaderBinding b1 = { .label = "ImmUniforms",
+ .kind = BINDING_BYTES,
+ // .vis = VISIBILITY_VERTEX,
+ .data.bytes.size = sizeof(ImmediateUniforms) };
+
+ if (has_data) {
+ b1.data.bytes.data = d;
+ }
+
+ return (ShaderDataLayout){ .bindings = { b1 }, .binding_count = 1 };
+}
diff --git a/archive/src/render/pbr.c b/archive/src/render/pbr.c
new file mode 100644
index 0000000..4bad528
--- /dev/null
+++ b/archive/src/render/pbr.c
@@ -0,0 +1,266 @@
+#include "pbr.h"
+#include "animation.h"
+#include "camera.h"
+#include "core.h"
+#include "file.h"
+#include "log.h"
+#include "maths.h"
+#include "mem.h"
+#include "ral_common.h"
+#include "ral_impl.h"
+#include "ral_types.h"
+#include "render.h"
+#include "render_types.h"
+#include "shader_layouts.h"
+
+void PBR_Init(PBR_Storage* storage) {
+ INFO("PBR shaders init");
+ storage->pbr_pass = PBR_RPassCreate();
+ PBR_PipelinesCreate(storage, storage->pbr_pass);
+}
+
+GPU_Renderpass* PBR_RPassCreate() {
+ GPU_RenderpassDesc desc = { .default_framebuffer = true };
+ return GPU_Renderpass_Create(desc);
+}
+
+void PBR_PipelinesCreate(PBR_Storage* storage, GPU_Renderpass* rpass) {
+ // Common shader bindings
+ ShaderDataLayout camera_data = Binding_Camera_GetLayout(NULL);
+ ShaderDataLayout model_data = Binding_Model_GetLayout(NULL);
+ ShaderDataLayout material_data = PBRMaterial_GetLayout(NULL);
+ ShaderDataLayout lights_data = Binding_Lights_GetLayout(NULL);
+
+ // Static
+ {
+ const char* vert_path = "assets/shaders/static_geometry.vert";
+ const char* frag_path = "assets/shaders/pbr_textured.frag";
+ char* vert_shader = string_from_file(vert_path);
+ char* frag_shader = string_from_file(frag_path);
+
+ GraphicsPipelineDesc desc = {
+ .debug_name = "PBR (Static) Pipeline",
+ .vertex_desc = static_3d_vertex_description(),
+ .data_layouts = { camera_data, model_data, material_data, lights_data },
+ .data_layouts_count = 4,
+ .vs = { .debug_name = "PBR (textured) Vertex Shader",
+ .filepath = str8(vert_path),
+ .code = vert_shader },
+ .fs = { .debug_name = "PBR (textured) Fragment Shader",
+ .filepath = str8(frag_path),
+ .code = frag_shader },
+ .depth_test = true,
+ .wireframe = true,
+ };
+ storage->pbr_static_pipeline = GPU_GraphicsPipeline_Create(desc, rpass);
+ }
+
+ // Skinned
+ {
+ const char* vert_path = "assets/shaders/skinned_geometry.vert";
+ const char* frag_path = "assets/shaders/pbr_textured.frag";
+ char* vert_shader = string_from_file(vert_path);
+ char* frag_shader = string_from_file(frag_path);
+
+ ShaderDataLayout anim_uniform = AnimData_GetLayout(NULL);
+
+ VertexDescription vertex_desc = { .debug_label = "Skinned vertices",
+ .use_full_vertex_size = true };
+ VertexDesc_AddAttr(&vertex_desc, "inPosition", ATTR_F32x3);
+ VertexDesc_AddAttr(&vertex_desc, "inNormal", ATTR_F32x3);
+ VertexDesc_AddAttr(&vertex_desc, "inTexCoords", ATTR_F32x2);
+ VertexDesc_AddAttr(&vertex_desc, "inBoneIndices", ATTR_I32x4);
+ VertexDesc_AddAttr(&vertex_desc, "inWeights", ATTR_F32x4);
+
+ GraphicsPipelineDesc desc = {
+ .debug_name = "PBR (Skinned) Pipeline",
+ .vertex_desc = vertex_desc,
+ .data_layouts = { camera_data, model_data, material_data, lights_data, anim_uniform },
+ .data_layouts_count = 5,
+ .vs = { .debug_name = "PBR (textured) Vertex Shader",
+ .filepath = str8(vert_path),
+ .code = vert_shader },
+ .fs = { .debug_name = "PBR (textured) Fragment Shader",
+ .filepath = str8(frag_path),
+ .code = frag_shader },
+ .depth_test = true,
+ .wireframe = true,
+ };
+ storage->pbr_skinned_pipeline = GPU_GraphicsPipeline_Create(desc, rpass);
+ }
+}
+
+void PBR_Execute(PBR_Storage* storage, Camera camera, TextureHandle shadowmap_tex,
+ RenderEnt* entities, size_t entity_count) {
+ // 1. set up our pipeline
+ // 2. upload constant data (camera, lights)
+ // 3. draw each entity
+ // - upload material data -> in the future we will sort & batch by material
+ // - upload model transform
+ // - emit draw call
+
+ GPU_CmdEncoder* enc = GPU_GetDefaultEncoder();
+ GPU_CmdEncoder_BeginRender(enc, storage->pbr_pass);
+
+ // TEMP: only do skinned
+ GPU_EncodeBindPipeline(enc, storage->pbr_skinned_pipeline);
+
+ // Feed shader data
+ Mat4 view, proj;
+ u32x2 dimensions = GPU_Swapchain_GetDimensions();
+ Camera_ViewProj(&camera, (f32)dimensions.x, (f32)dimensions.y, &view, &proj);
+ Binding_Camera camera_data = { .view = view,
+ .projection = proj,
+ .viewPos = vec4(camera.position.x, camera.position.y,
+ camera.position.z, 1.0) };
+ GPU_EncodeBindShaderData(enc, 0, Binding_Camera_GetLayout(&camera_data));
+
+ Vec3 light_color = vec3(300.0, 300.0, 300.0);
+ Binding_Lights
+ lights_data = { .pointLights = {
+ // FIXME: add lights to our RenderScene structure. for now these are
+ // hardcoded
+ (pbr_point_light){ .pos = vec3(0.0, 6.0, 6.0), .color = light_color },
+ (pbr_point_light){ .pos = vec3(-10, 10, 10), .color = light_color },
+ (pbr_point_light){ .pos = vec3(10, -10, 10), .color = light_color },
+ (pbr_point_light){ .pos = vec3(-10, -10, 10), .color = light_color },
+ } };
+ GPU_EncodeBindShaderData(enc, 3, Binding_Lights_GetLayout(&lights_data));
+
+ // TODO: Add shadowmap texture to uniforms
+ Mesh_pool* mesh_pool = Render_GetMeshPool();
+ Material_pool* material_pool = Render_GetMaterialPool();
+
+ for (size_t ent_i = 0; ent_i < entity_count; ent_i++) {
+ RenderEnt renderable = entities[ent_i];
+ Mesh* mesh = Mesh_pool_get(mesh_pool, renderable.mesh);
+ Material* mat = Material_pool_get(material_pool, renderable.material);
+
+ // upload material data
+ PBRMaterialUniforms material_data = { .mat = *mat };
+ GPU_EncodeBindShaderData(enc, 2, PBRMaterial_GetLayout(&material_data));
+
+ // upload model transform
+ Binding_Model model_data = { .model = renderable.affine };
+ GPU_EncodeBindShaderData(enc, 1, Binding_Model_GetLayout(&model_data));
+
+ // Skinning matrices
+
+ // 1. calculate matrices
+ AnimDataUniform anim_data = { 0 };
+ CASSERT(renderable.armature);
+ Armature* skeleton = renderable.armature;
+ // Skip the first one as we assume its root for this test
+ for (int j_i = 1; j_i < skeleton->joints->len; j_i++) {
+ Joint* j = &skeleton->joints->data[j_i];
+ j->local_transform = transform_to_mat(&j->transform_components);
+ Mat4 m = mat4_mult(j->local_transform, j->inverse_bind_matrix);
+ Joint* p = &skeleton->joints->data[j->parent];
+ j->local_transform = mat4_mult(j->local_transform, p->local_transform);
+ printf("Quat %f \n", j->transform_components.rotation.z);
+ }
+
+ // 2. bind and upload
+ for (int j_i = 1; j_i < skeleton->joints->len; j_i++) {
+ anim_data.bone_matrices[j_i] = skeleton->joints->data[j_i].local_transform;
+ }
+ GPU_EncodeBindShaderData(enc, 3, AnimData_GetLayout(&anim_data));
+
+ // set buffers
+ GPU_EncodeSetVertexBuffer(enc, mesh->vertex_buffer);
+ GPU_EncodeSetIndexBuffer(enc, mesh->index_buffer);
+ // draw
+ GPU_EncodeDrawIndexedTris(enc, mesh->geometry.index_count);
+ }
+
+ GPU_CmdEncoder_EndRender(enc);
+}
+
+void PBRMaterial_BindData(ShaderDataLayout* layout, const void* data) {
+ PBRMaterialUniforms* d = (PBRMaterialUniforms*)data;
+ CASSERT(data);
+ CASSERT(layout->binding_count == 5);
+
+ TextureHandle white1x1 = Render_GetWhiteTexture();
+ if (d->mat.albedo_map.raw != INVALID_TEX_HANDLE.raw) {
+ layout->bindings[0].data.texture.handle = d->mat.albedo_map;
+ } else {
+ layout->bindings[0].data.texture.handle = white1x1;
+ }
+ // TODO .. the rest
+}
+
+ShaderDataLayout PBRMaterial_GetLayout(void* data) {
+ PBRMaterialUniforms* d = (PBRMaterialUniforms*)data;
+ bool has_data = data != NULL;
+
+ ShaderBinding b1 = {
+ .label = "albedoMap",
+ .kind = BINDING_TEXTURE,
+ };
+ ShaderBinding b2 = {
+ .label = "metallicRoughnessMap",
+ .kind = BINDING_TEXTURE,
+ };
+ ShaderBinding b3 = {
+ .label = "aoMap",
+ .kind = BINDING_TEXTURE,
+ };
+ ShaderBinding b4 = {
+ .label = "normalMap",
+ .kind = BINDING_TEXTURE,
+ };
+ ShaderBinding b5 = { .label = "PBR_Params",
+ .kind = BINDING_BYTES,
+ .data.bytes.size = sizeof(PBR_Params) };
+
+ if (has_data) {
+ TextureHandle white1x1 = Render_GetWhiteTexture();
+ if (d->mat.albedo_map.raw != INVALID_TEX_HANDLE.raw) {
+ b1.data.texture.handle = d->mat.albedo_map;
+ } else {
+ b1.data.texture.handle = white1x1;
+ }
+
+ if (d->mat.metallic_roughness_map.raw != INVALID_TEX_HANDLE.raw) {
+ b2.data.texture.handle = d->mat.metallic_roughness_map;
+ } else {
+ b2.data.texture.handle = white1x1;
+ }
+
+ if (d->mat.ambient_occlusion_map.raw != INVALID_TEX_HANDLE.raw) {
+ b3.data.texture.handle = d->mat.ambient_occlusion_map;
+ } else {
+ b3.data.texture.handle = white1x1;
+ }
+
+ if (d->mat.normal_map.raw != INVALID_TEX_HANDLE.raw) {
+ b4.data.texture.handle = d->mat.normal_map;
+ } else {
+ b4.data.texture.handle = white1x1;
+ }
+
+ arena* frame = Render_GetFrameArena();
+ PBR_Params* params = arena_alloc(frame, sizeof(PBR_Params));
+ params->albedo = d->mat.base_colour;
+ params->metallic = d->mat.metallic;
+ params->roughness = d->mat.roughness;
+ params->ambient_occlusion = d->mat.ambient_occlusion;
+ b5.data.bytes.data = params;
+ }
+
+ return (ShaderDataLayout){ .bindings = { b1, b2, b3, b4, b5 }, .binding_count = 5 };
+}
+
+Material PBRMaterialDefault() {
+ return (Material){ .name = "Standard Material",
+ .kind = MAT_PBR,
+ .base_colour = vec3(1.0, 1.0, 1.0),
+ .metallic = 0.0,
+ .roughness = 0.5,
+ .ambient_occlusion = 0.0,
+ .albedo_map = INVALID_TEX_HANDLE,
+ .metallic_roughness_map = INVALID_TEX_HANDLE,
+ .normal_map = INVALID_TEX_HANDLE,
+ .ambient_occlusion_map = INVALID_TEX_HANDLE };
+}
diff --git a/archive/src/render/pbr.h b/archive/src/render/pbr.h
new file mode 100644
index 0000000..5a21533
--- /dev/null
+++ b/archive/src/render/pbr.h
@@ -0,0 +1,70 @@
+/**
+ * @file pbr.h
+ * @brief PBR render pass and uniforms
+ */
+
+#pragma once
+#include "backend_opengl.h"
+#include "camera.h"
+#include "defines.h"
+#include "maths_types.h"
+#include "ral_types.h"
+#include "render_types.h"
+
+// --- Public API
+
+/** @brief Holds data for the PBR pipeline */
+typedef struct PBR_Storage {
+ GPU_Renderpass* pbr_pass;
+ GPU_Pipeline* pbr_static_pipeline;
+ GPU_Pipeline* pbr_skinned_pipeline;
+} PBR_Storage;
+
+typedef struct PBRMaterialUniforms {
+ Material mat;
+} PBRMaterialUniforms;
+
+/** @brief */
+PUB void PBR_Init(PBR_Storage* storage);
+
+// NOTE: For simplicity's sake we will render this pass directly to the default framebuffer
+// internally this defers to `PBR_Execute()`
+PUB void PBR_Run(PBR_Storage* storage
+ // light data
+ // camera
+ // geometry
+ // materials
+);
+
+/** @brief Parameters that get passed as a uniform block to the PBR shader */
+typedef struct PBR_Params {
+ Vec3 albedo;
+ f32 metallic;
+ f32 roughness;
+ f32 ambient_occlusion;
+} PBR_Params;
+
+/** @brief Textures that will get passed into the PBR shader if they're not `INVALID_TEX_HANDLE` */
+typedef struct PBR_Textures {
+ TextureHandle albedo_map;
+ TextureHandle normal_map;
+ bool metal_roughness_combined;
+ TextureHandle metallic_map;
+ TextureHandle roughness_map;
+ TextureHandle ao_map;
+} PBR_Textures;
+
+/** @brief Returns a default white matte material */
+PUB Material PBRMaterialDefault();
+
+PUB ShaderDataLayout PBRMaterial_GetLayout(void* data);
+
+// --- Internal
+
+GPU_Renderpass* PBR_RPassCreate(); /** @brief Create the PBR Renderpass */
+
+void PBR_PipelinesCreate(PBR_Storage* storage,
+ GPU_Renderpass* rpass); /** @brief Create PBR Pipelines */
+
+void PBR_Execute(PBR_Storage* storage, Camera camera, TextureHandle shadowmap_tex,
+ RenderEnt* entities, size_t entity_count);
diff --git a/archive/src/render/render.c b/archive/src/render/render.c
new file mode 100644
index 0000000..af636a8
--- /dev/null
+++ b/archive/src/render/render.c
@@ -0,0 +1,359 @@
+/**
+ * @brief
+ */
+
+#include "render.h"
+#include <assert.h>
+#include <glfw3.h>
+#include <stdio.h>
+#include "camera.h"
+#include "core.h"
+#include "grid.h"
+#include "immdraw.h"
+#include "log.h"
+#include "maths.h"
+#include "maths_types.h"
+#include "mem.h"
+#include "pbr.h"
+#include "ral_common.h"
+#include "ral_impl.h"
+#include "ral_types.h"
+#include "render_types.h"
+#include "shadows.h"
+#include "terrain.h"
+
+#define STB_IMAGE_IMPLEMENTATION
+#include <stb_image.h>
+
+#define FRAME_ARENA_SIZE MB(1)
+#define POOL_SIZE_BYTES \
+ MB(10) // we will reserve 10 megabytes up front to store resource, mesh, and material pools
+#define MAX_MESHES 1024
+#define MAX_MATERIALS 256
+
+extern Core g_core;
+
+struct Renderer {
+ struct GLFWwindow* window;
+ RendererConfig config;
+ GPU_Device device;
+ GPU_Swapchain swapchain;
+ GPU_Renderpass* default_renderpass;
+ bool frame_aborted;
+ RenderMode render_mode;
+ RenderScene scene;
+ PBR_Storage* pbr;
+ Shadow_Storage* shadows;
+ Terrain_Storage* terrain;
+ Grid_Storage* grid;
+ Immdraw_Storage* immediate;
+ // Text_Storage* text;
+ ResourcePools* resource_pools;
+ Mesh_pool mesh_pool;
+ Material_pool material_pool;
+ arena frame_arena;
+ TextureHandle white_1x1;
+ TextureHandle black_1x1;
+};
+
+Renderer* get_renderer() { return g_core.renderer; }
+
+bool Renderer_Init(RendererConfig config, Renderer* ren, GLFWwindow** out_window,
+ GLFWwindow* optional_window) {
+ INFO("Renderer init");
+ ren->render_mode = RENDER_MODE_DEFAULT;
+
+ ren->frame_arena = arena_create(malloc(FRAME_ARENA_SIZE), FRAME_ARENA_SIZE);
+
+ // init resource pools
+ DEBUG("Initialise GPU resource pools");
+ arena pool_arena = arena_create(malloc(POOL_SIZE_BYTES), POOL_SIZE_BYTES);
+ ren->resource_pools = arena_alloc(&pool_arena, sizeof(struct ResourcePools));
+ ResourcePools_Init(&pool_arena, ren->resource_pools);
+ ren->mesh_pool = Mesh_pool_create(&pool_arena, MAX_MESHES, sizeof(Mesh));
+ ren->material_pool = Material_pool_create(&pool_arena, MAX_MATERIALS, sizeof(Material));
+
+ // GLFW window creation
+ GLFWwindow* window;
+ if (optional_window != NULL) {
+ INFO("GLFWwindow pointer was provided!!!! Skipping generic glfw init..");
+ window = optional_window;
+ } else {
+ INFO("No GLFWwindow provided - creating one");
+ // NOTE: all platforms use GLFW at the moment but thats subject to change
+ glfwInit();
+
+#if defined(CEL_REND_BACKEND_OPENGL)
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
+ glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
+ glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
+#elif defined(CEL_REND_BACKEND_VULKAN)
+ glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
+#endif
+
+ window = glfwCreateWindow(config.scr_width, config.scr_height, config.window_name, NULL, NULL);
+ INFO("Window created");
+ if (window == NULL) {
+ ERROR("Failed to create GLFW window\n");
+ glfwTerminate();
+ return false;
+ }
+ }
+
+ ren->window = window;
+ *out_window = window;
+
+ glfwMakeContextCurrent(ren->window);
+
+ DEBUG("Set up GLFW window callbacks");
+ glfwSetWindowSizeCallback(window, Render_WindowSizeChanged);
+
+ // set the RAL backend up
+ if (!GPU_Backend_Init(config.window_name, window, ren->resource_pools)) {
+ return false;
+ }
+
+ GPU_Device_Create(&ren->device);
+ GPU_Swapchain_Create(&ren->swapchain);
+
+ // set up default scene
+ Camera default_cam =
+ Camera_Create(vec3(0.0, 2.0, 4.0), vec3_normalise(vec3(0.0, -2.0, -4.0)), VEC3_Y, 45.0);
+ SetCamera(default_cam);
+ DirectionalLight default_light = { /* TODO */ };
+ SetMainLight(default_light);
+
+ // create our renderpasses
+ ren->shadows = malloc(sizeof(Shadow_Storage));
+ Shadow_Init(ren->shadows, 1024, 1024);
+
+ ren->pbr = calloc(1, sizeof(PBR_Storage));
+ PBR_Init(ren->pbr);
+
+ ren->terrain = calloc(1, sizeof(Terrain_Storage));
+ Terrain_Init(ren->terrain);
+
+ // FIXME
+ // ren->grid = calloc(1, sizeof(Grid_Storage));
+ // Grid_Init(ren->grid);
+
+ ren->immediate = calloc(1, sizeof(Immdraw_Storage));
+ Immdraw_Init(ren->immediate);
+
+ // load default textures
+ ren->white_1x1 = TextureLoadFromFile("assets/textures/white1x1.png");
+ ren->black_1x1 = TextureLoadFromFile("assets/textures/black1x1.png");
+
+ return true;
+}
+
+void Renderer_Shutdown(Renderer* ren) {
+ free(ren->shadows);
+ DEBUG("Freed Shadows storage");
+ free(ren->pbr);
+ DEBUG("Freed PBR storage");
+ free(ren->terrain);
+ DEBUG("Freed Terrain storage");
+ free(ren->immediate);
+ DEBUG("Freed Immdraw storage");
+ arena_free_storage(&ren->frame_arena);
+ DEBUG("Freed frame allocator buffer");
+}
+size_t Renderer_GetMemReqs() { return sizeof(Renderer); }
+
+void Render_WindowSizeChanged(GLFWwindow* window, i32 new_width, i32 new_height) {
+ (void)window;
+ INFO("Window size changed callback");
+ // Renderer* ren = Core_GetRenderer(&g_core);
+ GPU_Swapchain_Resize(new_width, new_height);
+}
+
+void Render_FrameBegin(Renderer* ren) {
+ arena_free_all(&ren->frame_arena);
+ ren->frame_aborted = false;
+ if (!GPU_Backend_BeginFrame()) {
+ ren->frame_aborted = true;
+ WARN("Frame aborted");
+ return;
+ }
+}
+void Render_FrameEnd(Renderer* ren) {
+ if (ren->frame_aborted) {
+ return;
+ }
+
+ GPU_CmdEncoder* enc = GPU_GetDefaultEncoder();
+
+ GPU_Backend_EndFrame();
+}
+void Render_RenderEntities(RenderEnt* entities, size_t entity_count) {
+ Renderer* ren = get_renderer();
+ RenderScene scene = ren->scene;
+
+ // FUTURE: Depth pre-pass
+
+ Shadow_Storage* shadow_storage = Render_GetShadowStorage();
+ shadow_storage->enabled = false;
+ TextureHandle sun_shadowmap =
+ shadow_storage->enabled ? Shadow_GetShadowMapTexture(shadow_storage) : INVALID_TEX_HANDLE;
+
+ PBR_Execute(ren->pbr, scene.camera, sun_shadowmap, entities, entity_count);
+}
+
+TextureData TextureDataLoad(const char* path, bool invert_y) {
+ TRACE("Load texture %s", path);
+
+ // load the file data
+ int width, height, num_channels;
+ stbi_set_flip_vertically_on_load(invert_y);
+
+#pragma GCC diagnostic ignored "-Wpointer-sign"
+ char* data = stbi_load(path, &width, &height, &num_channels, 0);
+ if (data) {
+ DEBUG("loaded texture: %s", path);
+ } else {
+ WARN("failed to load texture");
+ }
+
+ // printf("width: %d height: %d num channels: %d\n", width, height, num_channels);
+
+ unsigned int channel_type;
+ GPU_TextureFormat format;
+ if (num_channels == 4) {
+ channel_type = GL_RGBA;
+ format = TEXTURE_FORMAT_8_8_8_8_RGBA_UNORM;
+ } else {
+ channel_type = GL_RGB;
+ format = TEXTURE_FORMAT_8_8_8_RGB_UNORM;
+ }
+ TextureDesc desc = {
+ .extents = { width, height },
+ .format = format,
+ .num_channels = num_channels,
+ .tex_type = TEXTURE_TYPE_2D,
+ };
+
+ return (TextureData){ .description = desc, .image_data = data };
+}
+
+TextureHandle TextureLoadFromFile(const char* path) {
+ TextureData tex_data = TextureDataLoad(path, false);
+ TextureHandle h = GPU_TextureCreate(tex_data.description, true, tex_data.image_data);
+ return h;
+}
+
+Mesh Mesh_Create(Geometry* geometry, bool free_on_upload) {
+ Mesh m = { 0 };
+
+ // Create and upload vertex buffer
+ size_t vert_bytes = geometry->vertices->len * sizeof(Vertex);
+ INFO("Creating vertex buffer with size %d (%d x %d)", vert_bytes, geometry->vertices->len,
+ sizeof(Vertex));
+ m.vertex_buffer =
+ GPU_BufferCreate(vert_bytes, BUFFER_VERTEX, BUFFER_FLAG_GPU, geometry->vertices->data);
+
+ // Create and upload index buffer
+ if (geometry->has_indices) {
+ size_t index_bytes = geometry->indices->len * sizeof(u32);
+ INFO("Creating index buffer with size %d (len: %d)", index_bytes, geometry->indices->len);
+ m.index_buffer =
+ GPU_BufferCreate(index_bytes, BUFFER_INDEX, BUFFER_FLAG_GPU, geometry->indices->data);
+ }
+
+ m.is_uploaded = true;
+ m.geometry = *geometry; // clone geometry data and store on Mesh struct
+ if (free_on_upload) {
+ Geometry_Destroy(geometry);
+ }
+ return m;
+}
+
+void Geometry_Destroy(Geometry* geometry) {
+ if (geometry->indices) {
+ u32_darray_free(geometry->indices);
+ }
+ if (geometry->vertices) {
+ Vertex_darray_free(geometry->vertices);
+ }
+}
+PUB MeshHandle Mesh_Insert(Mesh* mesh) { return Mesh_pool_insert(Render_GetMeshPool(), mesh); }
+PUB MaterialHandle Material_Insert(Material* material) {
+ return Material_pool_insert(Render_GetMaterialPool(), material);
+}
+Mesh* Mesh_Get(MeshHandle handle) { return Mesh_pool_get(Render_GetMeshPool(), handle); }
+
+void Mesh_DebugPrint(Mesh* mesh) {
+ printf("Mesh %d vertices %d indices %d joints \n", mesh->geometry.vertices->len,
+ mesh->geometry.indices->len);
+}
+
+size_t ModelExtractRenderEnts(RenderEnt_darray* entities, ModelHandle model_handle, Mat4 affine,
+ RenderEntityFlags flags) {
+ Model* model = MODEL_GET(model_handle);
+ for (u32 i = 0; i < model->mesh_count; i++) {
+ Mesh* m = Mesh_pool_get(Render_GetMeshPool(), model->meshes[i]);
+ RenderEnt data = { .mesh = model->meshes[i],
+ .material = m->material,
+ .affine = affine,
+ // .bounding_box
+ .flags = flags };
+ RenderEnt_darray_push(entities, data);
+ }
+ return model->mesh_count; // how many RenderEnts we pushed
+}
+
+void SetCamera(Camera camera) { g_core.renderer->scene.camera = camera; }
+void SetMainLight(DirectionalLight light) { g_core.renderer->scene.sun = light; }
+
+arena* GetRenderFrameArena(Renderer* r) { return &r->frame_arena; }
+
+RenderScene* Render_GetScene() {
+ Renderer* ren = Core_GetRenderer(&g_core);
+ return &ren->scene;
+}
+
+Shadow_Storage* Render_GetShadowStorage() {
+ Renderer* ren = Core_GetRenderer(&g_core);
+ return ren->shadows;
+}
+
+Terrain_Storage* Render_GetTerrainStorage() {
+ Renderer* ren = Core_GetRenderer(&g_core);
+ return ren->terrain;
+}
+
+Grid_Storage* Render_GetGridStorage() {
+ Renderer* ren = Core_GetRenderer(&g_core);
+ return ren->grid;
+}
+
+Immdraw_Storage* Render_GetImmdrawStorage() {
+ Renderer* ren = Core_GetRenderer(&g_core);
+ return ren->immediate;
+}
+
+TextureHandle Render_GetWhiteTexture() {
+ Renderer* ren = Core_GetRenderer(&g_core);
+ return ren->white_1x1;
+}
+
+/** @return an arena allocator that gets cleared at the beginning of every render frame */
+arena* Render_GetFrameArena() {
+ Renderer* ren = Core_GetRenderer(&g_core);
+ return &ren->frame_arena;
+}
+
+Mesh_pool* Render_GetMeshPool() {
+ Renderer* ren = Core_GetRenderer(&g_core);
+ return &ren->mesh_pool;
+}
+Material_pool* Render_GetMaterialPool() {
+ Renderer* ren = Core_GetRenderer(&g_core);
+ return &ren->material_pool;
+}
+
+void Render_SetRenderMode(RenderMode mode) {
+ Renderer* ren = Core_GetRenderer(&g_core);
+ ren->render_mode = mode;
+}
diff --git a/archive/src/render/render.h b/archive/src/render/render.h
new file mode 100644
index 0000000..d752f8b
--- /dev/null
+++ b/archive/src/render/render.h
@@ -0,0 +1,151 @@
+/**
+ * @brief
+ */
+
+#pragma once
+#include "camera.h"
+#include "defines.h"
+#include "grid.h"
+#include "immdraw.h"
+#include "maths_types.h"
+#include "ral_types.h"
+#include "render_types.h"
+#include "shadows.h"
+
+typedef struct Renderer Renderer;
+typedef struct GLFWwindow GLFWwindow;
+typedef struct RendererConfig {
+ const char* window_name;
+ u32 scr_width, scr_height;
+ Vec3 clear_colour;
+} RendererConfig;
+
+typedef struct RenderFlags {
+ bool wireframe;
+} RenderFlags;
+
+typedef struct RenderCtx {
+ Mat4 view;
+ Mat4 projection;
+} RenderCtx;
+
+/** @brief Holds globally bound data for rendering a scene. Typically held by the renderer.
+ * Whenever you call draw functions you can think of this as an implicit parameter. */
+typedef struct RenderScene {
+ Camera camera;
+ DirectionalLight sun;
+} RenderScene;
+
+PUB void SetCamera(Camera camera);
+PUB void SetMainLight(DirectionalLight light);
+
+// #define MESH_GET(h) (Mesh_pool_get(g_core.renderer->meshes, h))
+// #define MATERIAL_GET(h) (Material_pool_get(g_core.renderer->material, h))
+
+// --- Lifecycle
+
+PUB bool Renderer_Init(RendererConfig config, Renderer* renderer, GLFWwindow** out_window,
+ GLFWwindow* optional_window);
+PUB void Renderer_Shutdown(Renderer* renderer);
+PUB size_t Renderer_GetMemReqs();
+void Render_WindowSizeChanged(GLFWwindow* window, i32 new_width, i32 new_height);
+
+// internal init functions
+void DefaultPipelinesInit(Renderer* renderer);
+
+// NOTE: All of these functions grab the Renderer instance off the global Core
+PUB void Render_FrameBegin(Renderer* renderer);
+PUB void Render_FrameEnd(Renderer* renderer);
+
+/** @brief */
+PUB void Render_RenderEntities(RenderEnt* entities, size_t entity_count);
+
+// TODO: Render_FrameDraw(); - this will
+
+// --- Resources
+
+PUB TextureData TextureDataLoad(const char* path, bool invert_y);
+PUB void TextureUpload(TextureHandle handle, size_t n_bytes, const void* data);
+PUB TextureHandle TextureLoadFromFile(const char* path);
+PUB ModelHandle ModelLoad(const char* debug_name, const char* filepath);
+
+// --- Rendering Data
+
+PUB Mesh Mesh_Create(Geometry* geometry, bool free_on_upload);
+PUB void Mesh_Delete(Mesh* mesh);
+Mesh* Mesh_Get(MeshHandle handle);
+void Geometry_Destroy(Geometry* geometry);
+MeshHandle Mesh_Insert(Mesh* mesh);
+void Mesh_DebugPrint(Mesh* mesh);
+MaterialHandle Material_Insert(Material* material);
+
+/** @brief gets render entities from a model and pushes them into a dynamic array for rendering */
+size_t ModelExtractRenderEnts(RenderEnt_darray* entities, ModelHandle model_handle, Mat4 affine,
+ RenderEntityFlags flags);
+
+// --- Drawing
+
+// NOTE: These functions use the globally bound camera in RenderScene
+PUB void DrawMesh(Mesh* mesh, Material* material, Mat4 model);
+
+/** @brief the renderer does some internal bookkeeping for terrain so we use the terrain
+ stored on the Renderer rather than accept it as a parameter */
+PUB void Render_DrawTerrain();
+
+// --- Getters (not in love with this but I'm finding keeping Renderer internals private to be okay)
+arena* GetRenderFrameArena(Renderer* r);
+
+typedef struct RenderScene RenderScene;
+typedef struct Shadow_Storage Shadow_Storage;
+typedef struct Terrain_Storage Terrain_Storage;
+
+RenderScene* Render_GetScene();
+Shadow_Storage* Render_GetShadowStorage();
+Terrain_Storage* Render_GetTerrainStorage();
+Grid_Storage* Render_GetGridStorage();
+Immdraw_Storage* Render_GetImmdrawStorage();
+TextureHandle Render_GetWhiteTexture();
+arena* Render_GetFrameArena();
+Mesh_pool* Render_GetMeshPool();
+Material_pool* Render_GetMaterialPool();
+
+// --- Setters
+void Render_SetRenderMode(RenderMode mode);
+
+// -------------------------------------------------
+
+// Frame lifecycle on CPU
+
+// 1. extract
+// 2. culling
+// 3. render
+// 4. dispatch (combined with render for now)
+
+// typedef struct Cull_Result {
+// u64 n_visible_objects;
+// u64 n_culled_objects;
+// u32* visible_ent_indices; // allocated on frame arena
+// size_t index_count;
+// } Cull_Result;
+
+// // everything that can be in the world, knows how to extract rendering data
+// typedef void (*ExtractRenderData)(void* world_data);
+
+// typedef struct Renderer Renderer;
+
+// /** @brief Produces a smaller set of only those meshes visible in the camera frustum on the CPU
+// */ Cull_Result Frame_Cull(Renderer* ren, RenderEnt* entities, size_t entity_count, Camera*
+// camera);
+
+// Cull_Result Frame_Cull(Renderer* ren, RenderEnt* entities, size_t entity_count, Camera* camera) {
+// // TODO: u32 chunk_count = Tpool_GetNumWorkers();
+
+// arena* frame_arena = GetRenderFrameArena(ren);
+
+// Cull_Result result = { 0 };
+// result.visible_ent_indices = arena_alloc(
+// frame_arena, sizeof(u32) * entity_count); // make space for if all ents are visible
+
+// assert((result.n_visible_objects + result.n_culled_objects == entity_count));
+// return result;
+// }
diff --git a/archive/src/render/render_types.h b/archive/src/render/render_types.h
new file mode 100644
index 0000000..bdf9849
--- /dev/null
+++ b/archive/src/render/render_types.h
@@ -0,0 +1,138 @@
+/**
+ * @brief
+ */
+
+#pragma once
+#include "animation.h"
+#include "defines.h"
+#include "maths_types.h"
+#include "mem.h"
+#include "ral_types.h"
+
+// --- Handles
+
+#define INVALID_MODEL_HANDLE ((ModelHandle){ .raw = 9999991 })
+#define INVALID_MATERIAL_HANDLE ((MaterialHandle){ .raw = 9999992 })
+#define INVALID_MESH_HANDLE ((MeshHandle){ .raw = 9999993 })
+
+typedef enum RenderMode {
+ RENDER_MODE_DEFAULT,
+ RENDER_MODE_WIREFRAME,
+ RENDER_MODE_WIREFRAME_ON_LIT,
+ RENDER_MODE_COUNT
+} RenderMode;
+
+typedef struct u32_opt {
+ u32 value;
+ bool has_value;
+} u32_opt;
+
+typedef struct Mesh {
+ BufferHandle vertex_buffer;
+ BufferHandle index_buffer;
+ Geometry geometry; // NULL means it has been freed CPU-side
+ MaterialHandle material;
+ bool is_skinned; // false = its static
+ Armature armature;
+ bool is_uploaded; // has the data been uploaded to the GPU
+} Mesh;
+#ifndef TYPED_MESH_CONTAINERS
+KITC_DECL_TYPED_ARRAY(Mesh)
+TYPED_POOL(Mesh, Mesh)
+#define TYPED_MESH_CONTAINERS
+#endif
+
+typedef struct TextureData {
+ TextureDesc description;
+ void* image_data;
+} TextureData;
+
+// --- Supported materials
+typedef enum MaterialKind {
+ MAT_BLINN_PHONG, // NOTE: we're dropping support for this
+ MAT_PBR, // uses textures for PBR properties
+ MAT_PBR_PARAMS, // uses float values to represent a surface uniformly
+ MAT_COUNT
+} MaterialKind;
+static const char* material_kind_names[] = { "Blinn Phong", "PBR (Textures)", "PBR (Params)",
+ "Count (This should be an error)" };
+
+/**
+ * @brief
+ * @note based on https://google.github.io/filament/Filament.html#materialsystem/standardmodel
+ */
+typedef struct Material {
+ char name[64];
+ MaterialKind kind; // at the moment all materials are PBR materials
+ Vec3 base_colour; // linear RGB {0,0,0} to {1,1,1}
+ f32 metallic;
+ f32 roughness;
+ f32 ambient_occlusion;
+ TextureHandle albedo_map;
+ TextureHandle normal_map;
+ TextureHandle metallic_roughness_map;
+ TextureHandle ambient_occlusion_map;
+} Material;
+
+#ifndef TYPED_MATERIAL_CONTAINERS
+KITC_DECL_TYPED_ARRAY(Material)
+TYPED_POOL(Material, Material)
+#define TYPED_MATERIAL_CONTAINERS
+#endif
+
+/** @brief Convenient wrapper around a number of meshes each with a material */
+typedef struct Model {
+ Str8 name;
+ MeshHandle* meshes;
+ size_t mesh_count;
+ MaterialHandle* materials;
+ size_t material_count;
+ arena anim_arena;
+ AnimationClip_darray* animations;
+} Model;
+#ifndef TYPED_MODEL_ARRAY
+KITC_DECL_TYPED_ARRAY(Model)
+#define TYPED_MODEL_ARRAY
+#endif
+
+// TODO: function to create a model from a single mesh (like when using primitives)
+
+// --- Lights
+typedef struct PointLight {
+ Vec3 position;
+ f32 constant, linear, quadratic;
+ Vec3 ambient;
+ Vec3 diffuse;
+ Vec3 specular;
+} PointLight;
+
+typedef struct DirectionalLight {
+ Vec3 direction;
+ Vec3 ambient;
+ Vec3 diffuse;
+ Vec3 specular;
+} DirectionalLight;
+
+// ---
+
+typedef enum RenderEntityFlag {
+ REND_ENT_CASTS_SHADOWS = 1 << 0,
+ REND_ENT_VISIBLE = 1 << 1,
+} RenderEntityFlag;
+typedef u32 RenderEntityFlags;
+
+/** @brief A renderable 'thing' */
+typedef struct RenderEnt {
+ MeshHandle mesh;
+ MaterialHandle material;
+ /** If NULL, no armature and the mesh is static geometry, else it is to be skinned */
+ Armature* armature;
+ Mat4 affine; // In the future this should be updated by the transform graph
+ Bbox_3D bounding_box;
+ RenderEntityFlags flags;
+} RenderEnt;
+
+#ifndef TYPED_RENDERENT_ARRAY
+KITC_DECL_TYPED_ARRAY(RenderEnt)
+#define TYPED_RENDERENT_ARRAY
+#endif
diff --git a/archive/src/render/shader_layouts.h b/archive/src/render/shader_layouts.h
new file mode 100644
index 0000000..ef94c89
--- /dev/null
+++ b/archive/src/render/shader_layouts.h
@@ -0,0 +1,70 @@
+#pragma once
+#include "maths_types.h"
+#include "ral_types.h"
+
+/** @brief shader layout for camera matrices */
+typedef struct Binding_Camera {
+ Mat4 view;
+ Mat4 projection;
+ Vec4 viewPos;
+} Binding_Camera;
+
+typedef struct Binding_Model {
+ Mat4 model;
+} Binding_Model;
+
+/** @brief data that is handy to have in any shader */
+typedef struct Binding_Globals {
+} Binding_Globals;
+
+typedef struct pbr_point_light {
+ Vec3 pos;
+ f32 pad;
+ Vec3 color;
+ f32 pad2;
+} pbr_point_light;
+
+typedef struct Binding_Lights {
+ pbr_point_light pointLights[4];
+} Binding_Lights;
+
+static ShaderDataLayout Binding_Camera_GetLayout(void* data) {
+ Binding_Camera* d = data;
+ bool has_data = data != NULL;
+
+ ShaderBinding b1 = { .label = "Camera",
+ .kind = BINDING_BYTES,
+ .data.bytes = { .size = sizeof(Binding_Camera) } };
+ if (has_data) {
+ b1.data.bytes.data = d;
+ }
+ return (ShaderDataLayout){ .bindings = { b1 }, .binding_count = 1 };
+}
+
+static ShaderDataLayout Binding_Model_GetLayout(void* data) {
+ Binding_Model* d = data;
+ bool has_data = data != NULL;
+
+ ShaderBinding b1 = { .label = "Model",
+ .kind = BINDING_BYTES,
+ .vis = VISIBILITY_VERTEX,
+ .data.bytes = { .size = sizeof(Binding_Model) } };
+ if (has_data) {
+ b1.data.bytes.data = d;
+ }
+ return (ShaderDataLayout){ .bindings = { b1 }, .binding_count = 1 };
+}
+
+static ShaderDataLayout Binding_Lights_GetLayout(void* data) {
+ Binding_Lights* d = data;
+ bool has_data = data != NULL;
+
+ ShaderBinding b1 = { .label = "Lights",
+ .kind = BINDING_BYTES,
+ .vis = VISIBILITY_FRAGMENT,
+ .data.bytes = { .size = sizeof(Binding_Lights) } };
+ if (has_data) {
+ b1.data.bytes.data = d;
+ }
+ return (ShaderDataLayout){ .bindings = { b1 }, .binding_count = 1 };
+}
diff --git a/archive/src/render/shadows.c b/archive/src/render/shadows.c
new file mode 100644
index 0000000..029eefb
--- /dev/null
+++ b/archive/src/render/shadows.c
@@ -0,0 +1,211 @@
+#include "shadows.h"
+#include <string.h>
+#include "file.h"
+#include "glad/glad.h"
+#include "log.h"
+#include "maths.h"
+#include "maths_types.h"
+#include "primitives.h"
+#include "ral_common.h"
+#include "ral_impl.h"
+#include "ral_types.h"
+#include "render.h"
+#include "render_types.h"
+#include "str.h"
+
+ShaderDataLayout ShadowUniforms_GetLayout(void* data) {
+ ShadowUniforms* d = (ShadowUniforms*)data;
+ bool has_data = data != NULL;
+
+ ShaderBinding b1 = {
+ .label = "ShadowUniforms",
+ .kind = BINDING_BYTES,
+ .vis = VISIBILITY_VERTEX,
+ .data = { .bytes = { .size = sizeof(ShadowUniforms) } }
+ // TODO: split this into two bindings so we can update model matrix independently
+ };
+
+ if (has_data) {
+ b1.data.bytes.data = data;
+ }
+
+ return (ShaderDataLayout){ .binding_count = 1, .bindings = { b1 } };
+}
+
+ShaderDataLayout ShadowDebugQuad_GetLayout(void* data) {
+ TextureHandle* handle = data;
+ bool has_data = data != NULL;
+
+ ShaderBinding b1 = {
+ .label = "depthMap",
+ .kind = BINDING_TEXTURE,
+ .vis = VISIBILITY_FRAGMENT,
+ };
+
+ if (has_data) {
+ b1.data.texture.handle = *handle;
+ }
+
+ return (ShaderDataLayout){ .binding_count = 1, .bindings = { b1 } };
+}
+
+void Shadow_Init(Shadow_Storage* storage, u32 shadowmap_width, u32 shadowmap_height) {
+ memset(storage, 0, sizeof(Shadow_Storage));
+ arena scratch = arena_create(malloc(1024 * 1024), 1024 * 1024);
+
+ TextureDesc depthmap_desc = { .extents = u32x2(shadowmap_width, shadowmap_height),
+ .format = TEXTURE_FORMAT_DEPTH_DEFAULT,
+ .tex_type = TEXTURE_TYPE_2D };
+ DEBUG("Creating depth map texture for shadows");
+ TextureHandle depthmap = GPU_TextureCreate(depthmap_desc, false, NULL);
+ storage->depth_texture = depthmap;
+
+ // -- shadowmap drawing pass
+ GPU_RenderpassDesc rpass_desc = { .default_framebuffer = false,
+ .has_color_target = false,
+ .has_depth_stencil = true,
+ .depth_stencil = depthmap };
+
+ storage->shadowmap_pass = GPU_Renderpass_Create(rpass_desc);
+
+ WARN("About to laod shaders");
+ WARN("Shader paths: %s %s", "assets/shaders/shadows.vert", "assets/shaders/shadows.frag");
+ Str8 vert_path = str8("assets/shaders/shadows.vert");
+ Str8 frag_path = str8("assets/shaders/shadows.frag");
+ str8_opt vertex_shader = str8_from_file(&scratch, vert_path);
+ str8_opt fragment_shader = str8_from_file(&scratch, frag_path);
+ if (!vertex_shader.has_value || !fragment_shader.has_value) {
+ ERROR_EXIT("Failed to load shaders from disk");
+ }
+
+ ShaderDataLayout uniforms = ShadowUniforms_GetLayout(NULL);
+
+ GraphicsPipelineDesc pipeline_desc = {
+ .debug_name = "Shadows Pipeline",
+ .vertex_desc = static_3d_vertex_description(),
+ .data_layouts = { uniforms },
+ .data_layouts_count = 1,
+ .vs = { .debug_name = "Shadows Vert shader",
+ .filepath = vert_path,
+ .code = vertex_shader.contents,
+ .is_spirv = false },
+ .fs = { .debug_name = "Shadows Frag shader",
+ .filepath = frag_path,
+ .code = fragment_shader.contents,
+ .is_spirv = false },
+ };
+ storage->shadowmap_pipeline = GPU_GraphicsPipeline_Create(pipeline_desc, storage->shadowmap_pass);
+
+ // -- debug quad pipeline
+ GPU_RenderpassDesc debug_pass_desc = { .default_framebuffer = true };
+ storage->debugquad_pass = GPU_Renderpass_Create(debug_pass_desc);
+
+ vert_path = str8("assets/shaders/debug_quad.vert");
+ frag_path = str8("assets/shaders/debug_quad.frag");
+ vertex_shader = str8_from_file(&scratch, vert_path);
+ fragment_shader = str8_from_file(&scratch, frag_path);
+ if (!vertex_shader.has_value || !fragment_shader.has_value) {
+ ERROR_EXIT("Failed to load shaders from disk");
+ }
+
+ ShaderDataLayout debugquad_uniforms = ShadowDebugQuad_GetLayout(NULL);
+
+ GraphicsPipelineDesc debugquad_pipeline_desc = {
+ .debug_name = "Shadows debug quad Pipeline",
+ .vertex_desc = static_3d_vertex_description(),
+ .data_layouts = { debugquad_uniforms },
+ .data_layouts_count = 1,
+ .vs = { .debug_name = "depth debug quad vert shader",
+ .filepath = vert_path,
+ .code = vertex_shader.contents,
+ .is_spirv = false },
+ .fs = { .debug_name = "depth debug quad frag shader",
+ .filepath = frag_path,
+ .code = fragment_shader.contents,
+ .is_spirv = false },
+ };
+ storage->debugquad_pipeline =
+ GPU_GraphicsPipeline_Create(debugquad_pipeline_desc, storage->debugquad_pass);
+
+ Geometry quad_geo = Geo_CreatePlane(f32x2(1, 1), 1, 1);
+ // HACK: Swap vertices to make it face us
+ Vertex top0 = quad_geo.vertices->data[0];
+ quad_geo.vertices->data[0] = quad_geo.vertices->data[2];
+ quad_geo.vertices->data[2] = top0;
+ Vertex top1 = quad_geo.vertices->data[1];
+ quad_geo.vertices->data[1] = quad_geo.vertices->data[3];
+ quad_geo.vertices->data[3] = top1;
+ storage->quad = Mesh_Create(&quad_geo, false);
+
+ arena_free_storage(&scratch);
+}
+
+void Shadow_Run(RenderEnt* entities, size_t entity_count) {
+ Shadow_Storage* shadow_storage = Render_GetShadowStorage();
+
+ // calculations
+ RenderScene* render_scene = Render_GetScene();
+ f32 near_plane = 1.0, far_plane = 10.0;
+ // -- Not sure about how we want to handle lights
+ Vec3 light_position = { 1, 4, -1 };
+ // --
+ Mat4 light_projection = mat4_orthographic(-10.0, 10.0, -10.0, 10.0, near_plane, far_plane);
+ Mat4 light_view = mat4_look_at(light_position, VEC3_ZERO, VEC3_Y);
+ Mat4 light_space_matrix = mat4_mult(light_view, light_projection);
+
+ Shadow_ShadowmapExecute(shadow_storage, light_space_matrix, entities, entity_count);
+}
+
+void Shadow_DrawDebugQuad() {
+ Shadow_Storage* shadow_storage = Render_GetShadowStorage();
+
+ GPU_CmdEncoder* enc = GPU_GetDefaultEncoder();
+ GPU_CmdEncoder_BeginRender(enc, shadow_storage->debugquad_pass);
+
+ GPU_EncodeBindPipeline(enc, shadow_storage->debugquad_pipeline);
+ ShaderDataLayout quad_data = ShadowDebugQuad_GetLayout(&shadow_storage->depth_texture);
+ GPU_EncodeBindShaderData(enc, 0, quad_data);
+ GPU_EncodeSetVertexBuffer(enc, shadow_storage->quad.vertex_buffer);
+ GPU_EncodeSetIndexBuffer(enc, shadow_storage->quad.index_buffer);
+ GPU_EncodeDrawIndexedTris(enc, shadow_storage->quad.geometry.indices->len);
+
+ GPU_CmdEncoder_EndRender(enc);
+}
+
+void Shadow_ShadowmapExecute(Shadow_Storage* storage, Mat4 light_space_transform,
+ RenderEnt* entities, size_t entity_count) {
+ GPU_CmdEncoder shadow_encoder = GPU_CmdEncoder_Create();
+
+ GPU_CmdEncoder_BeginRender(&shadow_encoder, storage->shadowmap_pass);
+ // DEBUG("Begin shadowmap renderpass");
+
+ // FIXME: shouldnt be gl specific
+ glClear(GL_DEPTH_BUFFER_BIT);
+
+ GPU_EncodeBindPipeline(&shadow_encoder, storage->shadowmap_pipeline);
+
+ ShadowUniforms uniforms = {
+ .light_space = light_space_transform,
+ .model = mat4_ident() // this will be overwritten for each Model
+ };
+ ShaderDataLayout shader_data = ShadowUniforms_GetLayout(&uniforms);
+
+ for (size_t ent_i = 0; ent_i < entity_count; ent_i++) {
+ RenderEnt renderable = entities[ent_i];
+ if (renderable.flags && REND_ENT_CASTS_SHADOWS) {
+ // Model* model = MODEL_GET(renderable.model);
+
+ uniforms.model = renderable.affine; // update the model transform
+
+ Mesh* mesh = Mesh_pool_get(Render_GetMeshPool(), renderable.mesh);
+ GPU_EncodeBindShaderData(&shadow_encoder, 0, shader_data);
+ GPU_EncodeSetVertexBuffer(&shadow_encoder, mesh->vertex_buffer);
+ GPU_EncodeSetIndexBuffer(&shadow_encoder, mesh->index_buffer);
+ GPU_EncodeDrawIndexedTris(&shadow_encoder, mesh->geometry.indices->len);
+ }
+ }
+
+ GPU_CmdEncoder_EndRender(&shadow_encoder); // end renderpass
+}
+
+TextureHandle Shadow_GetShadowMapTexture(Shadow_Storage* storage) { return storage->depth_texture; }
diff --git a/archive/src/render/shadows.h b/archive/src/render/shadows.h
new file mode 100644
index 0000000..0482d10
--- /dev/null
+++ b/archive/src/render/shadows.h
@@ -0,0 +1,48 @@
+/**
+ * @brief Functions for adding shadows to scene rendering.
+ */
+
+#pragma once
+#include "defines.h"
+#include "ral_impl.h"
+#include "ral_types.h"
+#include "render_types.h"
+
+typedef struct Shadow_Storage {
+ bool enabled;
+ GPU_Renderpass* shadowmap_pass;
+ GPU_Pipeline* shadowmap_pipeline;
+ TextureHandle depth_texture;
+ bool debug_quad_enabled;
+ Mesh quad;
+ GPU_Renderpass* debugquad_pass;
+ GPU_Pipeline* debugquad_pipeline;
+ // TODO: Some statistics tracking
+} Shadow_Storage;
+
+typedef struct ShadowUniforms {
+ Mat4 light_space;
+ Mat4 model;
+} ShadowUniforms;
+
+typedef struct Camera Camera;
+typedef struct Mat4 Mat4;
+
+// --- Public API
+PUB void Shadow_Init(Shadow_Storage* storage, u32 shadowmap_width, u32 shadowmap_height);
+
+/** @brief Run shadow map generation for given entities, and store in a texture.
+ * @note Uses active directional light for now */
+PUB void Shadow_Run(RenderEnt* entities, size_t entity_count);
+
+PUB void Shadow_DrawDebugQuad();
+
+/** @brief Get the shadow texture generated from shadowmap pass */
+PUB TextureHandle Shadow_GetShadowMapTexture(Shadow_Storage* storage);
+
+// --- Internal
+GPU_Renderpass* Shadow_RPassCreate(); // Creates the render pass
+GPU_Pipeline* Shadow_PipelineCreate(GPU_Renderpass* rpass); // Creates the pipeline
+void Shadow_ShadowmapExecute(Shadow_Storage* storage, Mat4 light_space_transform,
+ RenderEnt* entities, size_t entity_count);
+void Shadow_RenderDebugQuad();
diff --git a/archive/src/render/skybox.c b/archive/src/render/skybox.c
new file mode 100644
index 0000000..b4e1e42
--- /dev/null
+++ b/archive/src/render/skybox.c
@@ -0,0 +1,161 @@
+#include "skybox.h"
+#include <assert.h>
+#include "file.h"
+#include "glad/glad.h"
+#include "log.h"
+#include "maths.h"
+#include "primitives.h"
+#include "ral_common.h"
+#include "ral_impl.h"
+#include "ral_types.h"
+#include "render.h"
+#include "render_types.h"
+#include "shader_layouts.h"
+
+float skyboxVertices[] = {
+ // positions
+ -1.0f, 1.0f, -1.0f, -1.0f, -1.0f, -1.0f, 1.0f, -1.0f, -1.0f,
+ 1.0f, -1.0f, -1.0f, 1.0f, 1.0f, -1.0f, -1.0f, 1.0f, -1.0f,
+
+ -1.0f, -1.0f, 1.0f, -1.0f, -1.0f, -1.0f, -1.0f, 1.0f, -1.0f,
+ -1.0f, 1.0f, -1.0f, -1.0f, 1.0f, 1.0f, -1.0f, -1.0f, 1.0f,
+
+ 1.0f, -1.0f, -1.0f, 1.0f, -1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
+ 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, -1.0f, 1.0f, -1.0f, -1.0f,
+
+ -1.0f, -1.0f, 1.0f, -1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
+ 1.0f, 1.0f, 1.0f, 1.0f, -1.0f, 1.0f, -1.0f, -1.0f, 1.0f,
+
+ -1.0f, 1.0f, -1.0f, 1.0f, 1.0f, -1.0f, 1.0f, 1.0f, 1.0f,
+ 1.0f, 1.0f, 1.0f, -1.0f, 1.0f, 1.0f, -1.0f, 1.0f, -1.0f,
+
+ -1.0f, -1.0f, -1.0f, -1.0f, -1.0f, 1.0f, 1.0f, -1.0f, -1.0f,
+ 1.0f, -1.0f, -1.0f, -1.0f, -1.0f, 1.0f, 1.0f, -1.0f, 1.0f
+};
+
+static const char* faces[6] = { "assets/demo/skybox/right.jpg", "assets/demo/skybox/left.jpg",
+ "assets/demo/skybox/top.jpg", "assets/demo/skybox/bottom.jpg",
+ "assets/demo/skybox/front.jpg", "assets/demo/skybox/back.jpg" };
+
+Skybox Skybox_Create(const char** face_paths, int n) {
+ INFO("Creating a skybox");
+ CASSERT_MSG(
+ n == 6,
+ "We only support full cubemaps for now"); // ! we're only supporting a full cubemap for now
+
+ // -- cube verts
+ Geometry geom = { .format = VERTEX_POS_ONLY, // doesnt matter
+ .has_indices = false,
+ .indices = NULL,
+ .vertices = Vertex_darray_new(36) };
+ for (u32 i = 0; i < (36 * 3); i += 3) {
+ Vertex_darray_push(
+ geom.vertices,
+ (Vertex){ .pos_only = { .position = vec3(skyboxVertices[i], skyboxVertices[i + 1],
+ skyboxVertices[i + 2]) } });
+ }
+ Mesh cube = Mesh_Create(&geom, false);
+
+ // -- cubemap texture
+ TextureHandle handle;
+ GPU_Texture* tex = GPU_TextureAlloc(&handle);
+ glBindTexture(GL_TEXTURE_CUBE_MAP, tex->id);
+
+ for (unsigned int i = 0; i < n; i++) {
+ TextureData data = TextureDataLoad(face_paths[i], false);
+ assert(data.description.format == TEXTURE_FORMAT_8_8_8_RGB_UNORM);
+ glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, GL_RGB, data.description.extents.x,
+ data.description.extents.y, 0, GL_RGB, GL_UNSIGNED_BYTE, data.image_data);
+ }
+ glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
+
+ // shader pipeline
+ GPU_RenderpassDesc rpass_desc = {
+ .default_framebuffer = true,
+ };
+ GPU_Renderpass* pass = GPU_Renderpass_Create(rpass_desc);
+
+ arena scratch = arena_create(malloc(1024 * 1024), 1024 * 1024);
+
+ Str8 vert_path = str8("assets/shaders/skybox.vert");
+ Str8 frag_path = str8("assets/shaders/skybox.frag");
+ str8_opt vertex_shader = str8_from_file(&scratch, vert_path);
+ str8_opt fragment_shader = str8_from_file(&scratch, frag_path);
+ if (!vertex_shader.has_value || !fragment_shader.has_value) {
+ ERROR_EXIT("Failed to load shaders from disk")
+ }
+
+ ShaderDataLayout camera_data = Binding_Camera_GetLayout(NULL);
+ ShaderDataLayout shader_data = Skybox_GetLayout(NULL);
+
+ VertexDescription builder = { .debug_label = "pos only" };
+ VertexDesc_AddAttr(&builder, "inPosition", ATTR_F32x3);
+ builder.use_full_vertex_size = true;
+
+ GraphicsPipelineDesc pipeline_desc = {
+ .debug_name = "Skybox pipeline",
+ .vertex_desc = builder,
+ .data_layouts = { shader_data, camera_data },
+ .data_layouts_count = 2,
+ .vs = { .debug_name = "Skybox Vertex Shader",
+ .filepath = vert_path,
+ .code = vertex_shader.contents },
+ .fs = { .debug_name = "Skybox Fragment Shader",
+ .filepath = frag_path,
+ .code = fragment_shader.contents },
+ .wireframe = false,
+ .depth_test = true,
+ };
+
+ GPU_Pipeline* pipeline = GPU_GraphicsPipeline_Create(pipeline_desc, pass);
+
+ return (Skybox){ .cube = cube, .texture = handle, .pipeline = pipeline };
+}
+
+Skybox Skybox_Default() { return Skybox_Create(faces, 6); }
+
+void Skybox_Draw(Skybox* skybox, Camera camera) {
+ GPU_CmdEncoder* enc = GPU_GetDefaultEncoder();
+ glDepthFunc(GL_LEQUAL);
+ GPU_CmdEncoder_BeginRender(enc, skybox->pipeline->renderpass);
+ GPU_EncodeBindPipeline(enc, skybox->pipeline);
+ GPU_EncodeSetDefaults(enc);
+
+ // Shader data
+
+ Mat4 view, proj;
+ u32x2 dimensions = GPU_Swapchain_GetDimensions();
+ Camera_ViewProj(&camera, dimensions.x, dimensions.y, &view, &proj);
+ Mat4 new = mat4_ident();
+ new.data[0] = view.data[0];
+ new.data[1] = view.data[1];
+ new.data[2] = view.data[2];
+ new.data[4] = view.data[4];
+ new.data[5] = view.data[5];
+ new.data[6] = view.data[6];
+ new.data[8] = view.data[8];
+ new.data[9] = view.data[9];
+ new.data[10] = view.data[10];
+
+ Binding_Camera camera_data = { .view = new,
+ .projection = proj,
+ .viewPos = vec4(camera.position.x, camera.position.y,
+ camera.position.z, 1.0) };
+ GPU_EncodeBindShaderData(enc, 0, Binding_Camera_GetLayout(&camera_data));
+
+ SkyboxUniforms uniforms = { .cubemap = skybox->texture };
+ ShaderDataLayout skybox_data = Skybox_GetLayout(&uniforms);
+ GPU_EncodeBindShaderData(enc, 0, skybox_data);
+
+ GPU_EncodeSetVertexBuffer(enc, skybox->cube.vertex_buffer);
+ GPU_EncodeSetIndexBuffer(enc, skybox->cube.index_buffer);
+
+ GPU_EncodeDrawTris(enc, 36);
+
+ GPU_CmdEncoder_EndRender(enc);
+ glDepthFunc(GL_LESS);
+}
diff --git a/archive/src/render/skybox.h b/archive/src/render/skybox.h
new file mode 100644
index 0000000..c2ef3a2
--- /dev/null
+++ b/archive/src/render/skybox.h
@@ -0,0 +1,41 @@
+/**
+ * @brief
+ */
+
+#pragma once
+#include "camera.h"
+#include "defines.h"
+#include "ral_impl.h"
+#include "render_types.h"
+
+typedef struct Skybox {
+ Mesh cube;
+ TextureHandle texture;
+ GPU_Pipeline* pipeline; // "shader"
+} Skybox;
+
+PUB Skybox Skybox_Create(const char** face_paths, int n); // should always pass n = 6 for now
+
+PUB void Skybox_Draw(Skybox* skybox, Camera camera);
+
+typedef struct SkyboxUniforms {
+ TextureHandle cubemap;
+} SkyboxUniforms;
+
+static ShaderDataLayout Skybox_GetLayout(void* data) {
+ SkyboxUniforms* d = (SkyboxUniforms*)data; // cold cast
+ bool has_data = data != NULL;
+
+ ShaderBinding b1 = {
+ .label = "cubeMap",
+ .vis = VISIBILITY_FRAGMENT,
+ .kind = BINDING_TEXTURE,
+ };
+
+ if (has_data) {
+ b1.data.texture.handle = d->cubemap;
+ }
+ return (ShaderDataLayout){ .bindings = { b1 }, .binding_count = 1 };
+}
+
+Skybox Skybox_Default(); \ No newline at end of file