summaryrefslogtreecommitdiff
path: root/archive/src/ral
diff options
context:
space:
mode:
Diffstat (limited to 'archive/src/ral')
-rw-r--r--archive/src/ral/README.md5
-rw-r--r--archive/src/ral/backends/metal/backend_metal.h0
-rw-r--r--archive/src/ral/backends/opengl/backend_opengl.c449
-rw-r--r--archive/src/ral/backends/opengl/backend_opengl.h109
-rw-r--r--archive/src/ral/backends/opengl/opengl_helpers.h159
-rw-r--r--archive/src/ral/backends/vulkan/backend_vulkan.c0
-rw-r--r--archive/src/ral/backends/vulkan/backend_vulkan.h44
-rw-r--r--archive/src/ral/backends/vulkan/vulkan_glossary.md18
-rw-r--r--archive/src/ral/backends/vulkan/vulkan_helpers.h199
-rw-r--r--archive/src/ral/ral.h5
-rw-r--r--archive/src/ral/ral_common.c70
-rw-r--r--archive/src/ral/ral_common.h61
-rw-r--r--archive/src/ral/ral_impl.h102
-rw-r--r--archive/src/ral/ral_types.h168
14 files changed, 1389 insertions, 0 deletions
diff --git a/archive/src/ral/README.md b/archive/src/ral/README.md
new file mode 100644
index 0000000..f66b95a
--- /dev/null
+++ b/archive/src/ral/README.md
@@ -0,0 +1,5 @@
+# RAL
+
+**Render Abstraction Layer** is a thin abstraction over graphics APIs. Everything in `render` builds on top of the code in
+this folder in order to be API-agnostic. It also makes writing graphics code easier as it smooths over some of the discrepancies
+between APIs like texture/buffer creation and updating shader values. \ No newline at end of file
diff --git a/archive/src/ral/backends/metal/backend_metal.h b/archive/src/ral/backends/metal/backend_metal.h
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/archive/src/ral/backends/metal/backend_metal.h
diff --git a/archive/src/ral/backends/opengl/backend_opengl.c b/archive/src/ral/backends/opengl/backend_opengl.c
new file mode 100644
index 0000000..613d7e1
--- /dev/null
+++ b/archive/src/ral/backends/opengl/backend_opengl.c
@@ -0,0 +1,449 @@
+#include "backend_opengl.h"
+#include "colours.h"
+#include "maths_types.h"
+#if defined(CEL_REND_BACKEND_OPENGL)
+#include <assert.h>
+#include "log.h"
+#include "mem.h"
+#include "opengl_helpers.h"
+#include "ral_common.h"
+#include "ral_impl.h"
+#include "ral_types.h"
+
+#include <glad/glad.h>
+#include <glfw3.h>
+
+typedef struct OpenglCtx {
+ GLFWwindow* window;
+ arena pool_arena;
+ GPU_Swapchain swapchain;
+ GPU_CmdEncoder main_encoder;
+ GPU_BackendPools gpu_pools;
+ ResourcePools* resource_pools;
+} OpenglCtx;
+
+static OpenglCtx context;
+
+bool GPU_Backend_Init(const char* window_name, struct GLFWwindow* window,
+ struct ResourcePools* res_pools) {
+ INFO("loading OpenGL backend");
+
+ memset(&context, 0, sizeof(context));
+ context.window = window;
+
+ size_t pool_buffer_size = 1024 * 1024;
+ context.pool_arena = arena_create(malloc(pool_buffer_size), pool_buffer_size);
+
+ BackendPools_Init(&context.pool_arena, &context.gpu_pools);
+ context.resource_pools = res_pools;
+
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
+ glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
+ glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
+ glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
+
+ // glad: load all opengl function pointers
+ if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress)) {
+ ERROR("Failed to initialise GLAD \n");
+ return false;
+ }
+
+ glEnable(GL_DEPTH_TEST);
+ glEnable(GL_CULL_FACE);
+
+ context.swapchain = (GPU_Swapchain){ .dimensions = u32x2(1000, 1000) };
+
+ return true;
+}
+
+// All of these are no-ops in OpenGL
+void GPU_Backend_Shutdown() { /* TODO */ }
+bool GPU_Device_Create(GPU_Device* out_device) { return true; }
+void GPU_Device_Destroy(GPU_Device* device) {}
+bool GPU_Swapchain_Create(GPU_Swapchain* out_swapchain) { return true; }
+void GPU_Swapchain_Destroy(GPU_Swapchain* swapchain) {}
+void GPU_CmdEncoder_Destroy(GPU_CmdEncoder* encoder) {}
+
+void GPU_CmdEncoder_BeginRender(GPU_CmdEncoder* encoder, GPU_Renderpass* renderpass) {
+ glBindFramebuffer(GL_FRAMEBUFFER, renderpass->fbo);
+ // rgba clear_colour = STONE_800;
+ // glClearColor(clear_colour.r, clear_colour.g, clear_colour.b, 1.0f);
+ // if (renderpass->description.has_depth_stencil) {
+ // glClear(GL_DEPTH_BUFFER_BIT);
+ // } else {
+ // glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+ // }
+}
+
+void GPU_CmdEncoder_EndRender(GPU_CmdEncoder* encoder) { glBindFramebuffer(GL_FRAMEBUFFER, 0); }
+
+GPU_CmdEncoder* GPU_GetDefaultEncoder() { return &context.main_encoder; }
+void GPU_QueueSubmit(GPU_CmdBuffer* cmd_buffer) {}
+
+void GPU_Swapchain_Resize(i32 new_width, i32 new_height) {
+ context.swapchain.dimensions = u32x2(new_width, new_height);
+}
+
+u32x2 GPU_Swapchain_GetDimensions() { return context.swapchain.dimensions; }
+
+GPU_Renderpass* GPU_Renderpass_Create(GPU_RenderpassDesc description) {
+ // allocate new pass
+ GPU_Renderpass* renderpass = Renderpass_pool_alloc(&context.gpu_pools.renderpasses, NULL);
+ renderpass->description = description;
+
+ if (!description.default_framebuffer) {
+ // If we're not using the default framebuffer we need to generate a new one
+ GLuint gl_fbo_id;
+ glGenFramebuffers(1, &gl_fbo_id);
+ renderpass->fbo = gl_fbo_id;
+ } else {
+ renderpass->fbo = OPENGL_DEFAULT_FRAMEBUFFER;
+ assert(!description.has_color_target);
+ assert(!description.has_depth_stencil);
+ }
+ glBindFramebuffer(GL_FRAMEBUFFER, renderpass->fbo);
+
+ if (description.has_color_target && !description.default_framebuffer) {
+ GPU_Texture* colour_attachment = TEXTURE_GET(description.color_target);
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
+ colour_attachment->id, 0);
+ }
+ if (description.has_depth_stencil && !description.default_framebuffer) {
+ GPU_Texture* depth_attachment = TEXTURE_GET(description.depth_stencil);
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depth_attachment->id,
+ 0);
+ }
+
+ if (description.has_depth_stencil && !description.has_color_target) {
+ glDrawBuffer(GL_NONE);
+ glReadBuffer(GL_NONE);
+ }
+
+ glBindFramebuffer(GL_FRAMEBUFFER, 0); // reset to default framebuffer
+
+ return renderpass;
+}
+
+void GPU_Renderpass_Destroy(GPU_Renderpass* pass) { glDeleteFramebuffers(1, &pass->fbo); }
+
+GPU_Pipeline* GPU_GraphicsPipeline_Create(GraphicsPipelineDesc description,
+ GPU_Renderpass* renderpass) {
+ GPU_Pipeline* pipeline = Pipeline_pool_alloc(&context.gpu_pools.pipelines, NULL);
+
+ // Create shader program
+ u32 shader_id = shader_create_separate(description.vs.filepath.buf, description.fs.filepath.buf);
+ pipeline->shader_id = shader_id;
+
+ // Vertex format
+ pipeline->vertex_desc = description.vertex_desc;
+
+ // Allocate uniform buffers if needed
+ u32 ubo_count = 0;
+ // printf("data layouts %d\n", description.data_layouts_count);
+ for (u32 layout_i = 0; layout_i < description.data_layouts_count; layout_i++) {
+ ShaderDataLayout sdl = description.data_layouts[layout_i];
+ TRACE("Got shader data layout %d's bindings! . found %d", layout_i, sdl.binding_count);
+
+ for (u32 binding_j = 0; binding_j < sdl.binding_count; binding_j++) {
+ u32 binding_id = binding_j;
+ assert(binding_id < MAX_PIPELINE_UNIFORM_BUFFERS);
+ ShaderBinding binding = sdl.bindings[binding_j];
+ // Do I want Buffer vs Bytes?
+ if (binding.kind == BINDING_BYTES) {
+ static u32 s_binding_point = 0;
+ BufferHandle ubo_handle = GPU_BufferCreate(binding.data.bytes.size, BUFFER_UNIFORM,
+ BUFFER_FLAG_GPU, NULL); // no data right now
+ pipeline->uniform_bindings[ubo_count++] = ubo_handle;
+ GPU_Buffer* ubo_buf = BUFFER_GET(ubo_handle);
+
+ i32 blockIndex = glGetUniformBlockIndex(pipeline->shader_id, binding.label);
+ printf("Block index for %s: %d", binding.label, blockIndex);
+ if (blockIndex < 0) {
+ WARN("Couldn't retrieve block index for uniform block '%s'", binding.label);
+ } else {
+ // DEBUG("Retrived block index %d for %s", blockIndex, binding.label);
+ }
+ u32 blocksize;
+ glGetActiveUniformBlockiv(pipeline->shader_id, blockIndex, GL_UNIFORM_BLOCK_DATA_SIZE,
+ &blocksize);
+ printf("\t with size %d bytes\n", blocksize);
+
+ glBindBufferBase(GL_UNIFORM_BUFFER, s_binding_point, ubo_buf->id.ubo);
+ if (blockIndex != GL_INVALID_INDEX) {
+ glUniformBlockBinding(pipeline->shader_id, blockIndex, s_binding_point);
+ }
+ ubo_buf->ubo_binding_point = s_binding_point++;
+ ubo_buf->name = binding.label;
+ assert(s_binding_point < GL_MAX_UNIFORM_BUFFER_BINDINGS);
+ }
+ }
+ }
+ pipeline->uniform_count = ubo_count;
+
+ pipeline->renderpass = renderpass;
+ pipeline->wireframe = description.wireframe;
+
+ return pipeline;
+}
+
+void GraphicsPipeline_Destroy(GPU_Pipeline* pipeline) {}
+
+GPU_CmdEncoder GPU_CmdEncoder_Create() {
+ GPU_CmdEncoder encoder = { 0 };
+ return encoder;
+}
+
+BufferHandle GPU_BufferCreate(u64 size, GPU_BufferType buf_type, GPU_BufferFlags flags,
+ const void* data) {
+ // "allocating" the cpu-side buffer struct
+ BufferHandle handle;
+ GPU_Buffer* buffer = Buffer_pool_alloc(&context.resource_pools->buffers, &handle);
+ buffer->size = size;
+ buffer->vao = 0;
+
+ // Opengl buffer
+ GLuint gl_buffer_id;
+ glGenBuffers(1, &gl_buffer_id);
+
+ GLenum gl_buf_type;
+ GLenum gl_buf_usage = GL_STATIC_DRAW;
+
+ switch (buf_type) {
+ case BUFFER_UNIFORM:
+ DEBUG("Creating Uniform buffer");
+ gl_buf_type = GL_UNIFORM_BUFFER;
+ /* gl_buf_usage = GL_DYNAMIC_DRAW; */
+ buffer->id.ubo = gl_buffer_id;
+ break;
+ case BUFFER_DEFAULT:
+ case BUFFER_VERTEX:
+ DEBUG("Creating Vertex buffer");
+ gl_buf_type = GL_ARRAY_BUFFER;
+ buffer->id.vbo = gl_buffer_id;
+ break;
+ case BUFFER_INDEX:
+ DEBUG("Creating Index buffer");
+ gl_buf_type = GL_ELEMENT_ARRAY_BUFFER;
+ buffer->id.ibo = gl_buffer_id;
+ break;
+ default:
+ WARN("Unimplemented gpu_buffer_type provided %s", buffer_type_names[buf_type]);
+ break;
+ }
+ // bind buffer
+ glBindBuffer(gl_buf_type, gl_buffer_id);
+
+ if (data) {
+ TRACE("Upload data (%d bytes) as part of buffer creation", size);
+ glBufferData(gl_buf_type, buffer->size, data, gl_buf_usage);
+ } else {
+ TRACE("Allocating but not uploading (%d bytes)", size);
+ glBufferData(gl_buf_type, buffer->size, NULL, gl_buf_usage);
+ }
+
+ glBindBuffer(gl_buf_type, 0);
+
+ return handle;
+}
+
+void GPU_BufferDestroy(BufferHandle handle) { glDeleteBuffers(1, &handle.raw); }
+
+TextureHandle GPU_TextureCreate(TextureDesc desc, bool create_view, const void* data) {
+ // "allocating" the cpu-side struct
+ TextureHandle handle;
+ GPU_Texture* texture = Texture_pool_alloc(&context.resource_pools->textures, &handle);
+ DEBUG("Allocated texture with handle %d", handle.raw);
+
+ GLuint gl_texture_id;
+ glGenTextures(1, &gl_texture_id);
+ texture->id = gl_texture_id;
+
+ GLenum gl_tex_type = opengl_tex_type(desc.tex_type);
+ texture->type = desc.tex_type;
+ printf("Creating texture of type %s\n", texture_type_names[desc.tex_type]);
+ glBindTexture(gl_tex_type, gl_texture_id);
+
+ GLint internal_format;
+ if (desc.format == TEXTURE_FORMAT_DEPTH_DEFAULT) {
+ internal_format = GL_DEPTH_COMPONENT;
+ } else if (desc.format == TEXTURE_FORMAT_8_8_8_8_RGBA_UNORM) {
+ internal_format = GL_RGBA;
+ } else {
+ internal_format = GL_RGB;
+ }
+
+ GLint format = internal_format;
+ // FIXME: GLint format = desc.format == TEXTURE_FORMAT_DEPTH_DEFAULT ? GL_DEPTH_COMPONENT :
+ // GL_RGBA;
+ GLenum data_type = desc.format == TEXTURE_FORMAT_DEPTH_DEFAULT ? GL_FLOAT : GL_UNSIGNED_BYTE;
+
+ if (desc.format == TEXTURE_FORMAT_DEPTH_DEFAULT) {
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
+ } else {
+ // set the texture wrapping parameters
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,
+ GL_REPEAT); // set texture wrapping to GL_REPEAT (default wrapping method)
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
+ // set texture filtering parameters
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ }
+
+ if (data) {
+ glTexImage2D(GL_TEXTURE_2D, 0, internal_format, desc.extents.x, desc.extents.y, 0, format,
+ data_type, data);
+ if (desc.tex_type == TEXTURE_TYPE_2D) {
+ glGenerateMipmap(GL_TEXTURE_2D);
+ }
+ } else {
+ WARN("No image data provided");
+ glTexImage2D(GL_TEXTURE_2D, 0, internal_format, desc.extents.x, desc.extents.y, 0, format,
+ data_type, NULL);
+ }
+
+ glBindTexture(GL_TEXTURE_2D, 0);
+
+ return handle;
+}
+
+GPU_Texture* GPU_TextureAlloc(TextureHandle* out_handle) {
+ TextureHandle handle;
+ GPU_Texture* texture = Texture_pool_alloc(&context.resource_pools->textures, &handle);
+ DEBUG("Allocated texture with handle %d", handle.raw);
+
+ GLuint gl_texture_id;
+ glGenTextures(1, &gl_texture_id);
+ texture->id = gl_texture_id;
+
+ if (out_handle != NULL) {
+ *out_handle = handle;
+ }
+
+ return texture;
+}
+
+void GPU_TextureDestroy(TextureHandle handle) { glDeleteTextures(1, &handle.raw); }
+
+// TODO: void GPU_TextureUpload(TextureHandle handle, size_t n_bytes, const void* data)
+
+void GPU_EncodeBindPipeline(GPU_CmdEncoder* encoder, GPU_Pipeline* pipeline) {
+ encoder->pipeline = pipeline;
+
+ // In OpenGL binding a pipeline is more or less equivalent to just setting the shader
+ glUseProgram(pipeline->shader_id);
+
+ if (pipeline->wireframe) {
+ glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
+ } else {
+ glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
+ }
+}
+
+void GPU_EncodeBindShaderData(GPU_CmdEncoder* encoder, u32 group, ShaderDataLayout layout) {
+ for (u32 binding_i = 0; binding_i < layout.binding_count; binding_i++) {
+ ShaderBinding binding = layout.bindings[binding_i];
+
+ switch (binding.kind) {
+ case BINDING_BYTES: {
+#ifdef RAL_ASSERTS
+ CASSERT_MSG(binding.data.bytes.data, "void* data pointer should be non null");
+ CASSERT_MSG(binding.data.bytes.size > 0, "size should be greater than 0 bytes");
+#endif
+ BufferHandle b;
+ GPU_Buffer* ubo_buf;
+ bool found = false;
+ for (u32 i = 0; i < encoder->pipeline->uniform_count; i++) {
+ b = encoder->pipeline->uniform_bindings[i];
+ ubo_buf = BUFFER_GET(b);
+ assert(ubo_buf->name != NULL);
+ if (strcmp(ubo_buf->name, binding.label) == 0) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ ERROR("Couldnt find uniform buffer object for %s!!", binding.label);
+ break;
+ }
+
+ i32 blockIndex = glGetUniformBlockIndex(encoder->pipeline->shader_id, binding.label);
+ if (blockIndex < 0) {
+ WARN("Couldn't retrieve block index for uniform block '%s'", binding.label);
+ break;
+ }
+
+ glBindBuffer(GL_UNIFORM_BUFFER, ubo_buf->id.ubo);
+ glBufferSubData(GL_UNIFORM_BUFFER, 0, ubo_buf->size, binding.data.bytes.data);
+ break;
+ }
+ case BINDING_TEXTURE: {
+ GPU_Texture* tex = TEXTURE_GET(binding.data.texture.handle);
+ GLint tex_slot = glGetUniformLocation(encoder->pipeline->shader_id, binding.label);
+ if (tex_slot == GL_INVALID_VALUE || tex_slot < 0) {
+ WARN("Invalid binding label for texture %s - couldn't fetch texture slot uniform",
+ binding.label);
+ }
+ glUniform1i(tex_slot, binding_i);
+ glActiveTexture(GL_TEXTURE0 + binding_i);
+ glBindTexture(opengl_tex_type(tex->type), tex->id);
+ break;
+ }
+ default:
+ WARN("Unsupported binding kind");
+ }
+ }
+}
+
+void GPU_EncodeSetDefaults(GPU_CmdEncoder* encoder) {}
+
+void GPU_EncodeSetVertexBuffer(GPU_CmdEncoder* encoder, BufferHandle buf) {
+ GPU_Buffer* buffer = BUFFER_GET(buf);
+ if (buffer->vao == 0) { // if no VAO for this vertex buffer, create it
+ INFO("Setting up VAO");
+ buffer->vao = opengl_bindcreate_vao(buffer, encoder->pipeline->vertex_desc);
+ }
+ glBindVertexArray(buffer->vao);
+}
+void GPU_EncodeSetIndexBuffer(GPU_CmdEncoder* encoder, BufferHandle buf) {
+ GPU_Buffer* buffer = BUFFER_GET(buf);
+ glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer->id.ibo);
+}
+void GPU_EncodeDrawTris(GPU_CmdEncoder* encoder, u64 count) {
+ glDrawArrays(GL_TRIANGLES, 0, count);
+}
+void GPU_EncodeDrawIndexedTris(GPU_CmdEncoder* encoder, u64 index_count) {
+ glDrawElements(GL_TRIANGLES, index_count, GL_UNSIGNED_INT, 0);
+}
+
+PUB void GPU_EncodeDraw(GPU_CmdEncoder* encoder, PrimitiveTopology topology, u64 count) {
+ glDrawArrays(opengl_prim_topology(topology), 0, count);
+}
+PUB void GPU_EncodeDrawIndexed(GPU_CmdEncoder* encoder, PrimitiveTopology topology,
+ u64 index_count) {
+ glDrawElements(opengl_prim_topology(topology), index_count, GL_UNSIGNED_INT, 0);
+}
+
+PUB void GPU_WriteTextureRegion(GPU_CmdEncoder* encoder, TextureHandle dst, u32 x_offset,
+ u32 y_offset, u32 width, u32 height, const void* data) {
+ CASSERT_MSG(data, "const void* data must not be NULL");
+
+ GPU_Texture* tex = TEXTURE_GET(dst);
+
+ glBindTexture(GL_TEXTURE_2D, tex->id);
+ glTexSubImage2D(GL_TEXTURE_2D, 0, x_offset, y_offset, width, height, GL_RGBA, GL_UNSIGNED_BYTE,
+ data);
+}
+
+bool GPU_Backend_BeginFrame() {
+ glViewport(0, 0, context.swapchain.dimensions.x * 2, context.swapchain.dimensions.y * 2);
+ glClearColor(0.1f, 0.1f, 0.1f, 1.0f);
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+ return true;
+}
+
+void GPU_Backend_EndFrame() { glfwSwapBuffers(context.window); }
+
+#endif
diff --git a/archive/src/ral/backends/opengl/backend_opengl.h b/archive/src/ral/backends/opengl/backend_opengl.h
new file mode 100644
index 0000000..7bd1b81
--- /dev/null
+++ b/archive/src/ral/backends/opengl/backend_opengl.h
@@ -0,0 +1,109 @@
+#pragma once
+#include "defines.h"
+
+#if defined(CEL_REND_BACKEND_OPENGL)
+
+#include "maths_types.h"
+#include "ral_impl.h"
+#include "ral_types.h"
+
+#define MAX_PIPELINE_UNIFORM_BUFFERS 32
+
+#define OPENGL_DEFAULT_FRAMEBUFFER 0
+
+typedef struct GPU_Swapchain {
+ u32x2 dimensions;
+} GPU_Swapchain;
+
+typedef struct GPU_Device {
+ u32 pad;
+} GPU_Device;
+
+typedef struct GPU_PipelineLayout {
+ void* pad;
+} GPU_PipelineLayout;
+
+typedef struct GPU_Pipeline {
+ u32 shader_id;
+ GPU_Renderpass* renderpass;
+ VertexDescription vertex_desc;
+ BufferHandle uniform_bindings[MAX_PIPELINE_UNIFORM_BUFFERS];
+ u32 uniform_count;
+ bool wireframe;
+} GPU_Pipeline;
+
+typedef struct GPU_Renderpass {
+ u32 fbo;
+ GPU_RenderpassDesc description;
+} GPU_Renderpass;
+
+typedef struct GPU_CmdEncoder {
+ GPU_Pipeline* pipeline;
+} GPU_CmdEncoder; // Recording
+
+typedef struct GPU_CmdBuffer {
+ void* pad;
+} GPU_CmdBuffer; // Ready for submission
+
+typedef struct GPU_Buffer {
+ union {
+ u32 vbo;
+ u32 ibo;
+ u32 ubo;
+ } id;
+ union {
+ u32 vao;
+ u32 ubo_binding_point;
+ }; // Optional
+ char* name;
+ u64 size;
+} GPU_Buffer;
+
+typedef struct GPU_Texture {
+ u32 id;
+ GPU_TextureType type;
+} GPU_Texture;
+
+typedef struct opengl_support {
+ u32 pad;
+} opengl_support;
+
+void uniform_vec3f(u32 program_id, const char* uniform_name, Vec3* value);
+void uniform_f32(u32 program_id, const char* uniform_name, f32 value);
+void uniform_i32(u32 program_id, const char* uniform_name, i32 value);
+void uniform_mat4f(u32 program_id, const char* uniform_name, Mat4* value);
+
+typedef enum GlCommandType {
+ GLCMD_DRAW,
+ GLCMD_DRAW_INDEXED,
+ GLCMD_BIND_VBUF,
+ GLCMD_BIND_IBUF,
+ GLCMD_SET_PROGRAM,
+} GlCommandType;
+
+typedef struct GlCommand {
+ GlCommandType cmd_type;
+ union {
+ struct {
+ PrimitiveTopology topology;
+ u32 start_vertex;
+ u32 vertex_count;
+ // TODO: instance
+ } draw;
+ struct {
+ PrimitiveTopology topology;
+ u32 index_count;
+ } draw_indexed;
+ struct {
+ u32 buffer_id;
+ } bind_vbuf;
+ struct {
+ u32 buffer_id;
+ } bind_ibuf;
+ struct {
+ u32 program_id;
+ } set_program;
+ } data;
+} GlCommand;
+
+#endif
diff --git a/archive/src/ral/backends/opengl/opengl_helpers.h b/archive/src/ral/backends/opengl/opengl_helpers.h
new file mode 100644
index 0000000..706e2a0
--- /dev/null
+++ b/archive/src/ral/backends/opengl/opengl_helpers.h
@@ -0,0 +1,159 @@
+#pragma once
+#include "defines.h"
+#include "ral_common.h"
+#include "ral_impl.h"
+#if defined(CEL_REND_BACKEND_OPENGL)
+#include "backend_opengl.h"
+#include "file.h"
+#include "log.h"
+#include "ral_types.h"
+
+#include <glad/glad.h>
+#include <glfw3.h>
+#include "ral_types.h"
+
+typedef struct opengl_vertex_attr {
+ u32 count;
+ GLenum data_type;
+} opengl_vertex_attr;
+
+static opengl_vertex_attr format_from_vertex_attr(VertexAttribType attr) {
+ switch (attr) {
+ case ATTR_F32:
+ return (opengl_vertex_attr){ .count = 1, .data_type = GL_FLOAT };
+ case ATTR_U32:
+ return (opengl_vertex_attr){ .count = 1, .data_type = GL_UNSIGNED_INT };
+ case ATTR_I32:
+ return (opengl_vertex_attr){ .count = 1, .data_type = GL_INT };
+ case ATTR_F32x2:
+ return (opengl_vertex_attr){ .count = 2, .data_type = GL_FLOAT };
+ case ATTR_U32x2:
+ // return VK_FORMAT_R32G32_UINT;
+ case ATTR_I32x2:
+ // return VK_FORMAT_R32G32_UINT;
+ case ATTR_F32x3:
+ return (opengl_vertex_attr){ .count = 3, .data_type = GL_FLOAT };
+ case ATTR_U32x3:
+ // return VK_FORMAT_R32G32B32_UINT;
+ case ATTR_I32x3:
+ // return VK_FORMAT_R32G32B32_SINT;
+ case ATTR_F32x4:
+ return (opengl_vertex_attr){ .count = 4, .data_type = GL_FLOAT };
+ case ATTR_U32x4:
+ // return VK_FORMAT_R32G32B32A32_UINT;
+ case ATTR_I32x4:
+ return (opengl_vertex_attr){ .count = 4, .data_type = GL_INT };
+ }
+}
+
+static u32 opengl_bindcreate_vao(GPU_Buffer* buf, VertexDescription desc) {
+ DEBUG("Vertex format name %s", desc.debug_label);
+ // 1. Bind the buffer
+ glBindBuffer(GL_ARRAY_BUFFER, buf->id.vbo);
+ // 2. Create new VAO
+ u32 vao;
+ glGenVertexArrays(1, &vao);
+ glBindVertexArray(vao);
+
+ // Attributes
+ u32 attr_count = desc.attributes_count;
+ // printf("N attributes %d\n", attr_count);
+ u64 offset = 0;
+ size_t vertex_size = desc.use_full_vertex_size ? sizeof(Vertex) : VertexDesc_CalcStride(&desc);
+ for (u32 i = 0; i < desc.attributes_count; i++) {
+ opengl_vertex_attr format = format_from_vertex_attr(desc.attributes[i]);
+ glVertexAttribPointer(i, format.count, format.data_type, GL_FALSE, vertex_size, (void*)offset);
+ TRACE(" %d %d %d %d %d %s", i, format.count, format.data_type, vertex_size, offset,
+ desc.attr_names[i]);
+ glEnableVertexAttribArray(i); // nth index
+ size_t this_offset = VertexAttribSize(desc.attributes[i]);
+ // printf("offset total %lld this attr %zu\n", offset, this_offset);
+ offset += this_offset;
+ }
+ glBindBuffer(GL_ARRAY_BUFFER, 0);
+
+ return vao;
+}
+
+static u32 shader_create_separate(const char* vert_shader, const char* frag_shader) {
+ INFO("Load shaders at %s and %s", vert_shader, frag_shader);
+ int success;
+ char info_log[512];
+
+ u32 vertex = glCreateShader(GL_VERTEX_SHADER);
+ const char* vertex_shader_src = string_from_file(vert_shader);
+ if (vertex_shader_src == NULL) {
+ ERROR("EXIT: couldnt load shader");
+ exit(-1);
+ }
+ glShaderSource(vertex, 1, &vertex_shader_src, NULL);
+ glCompileShader(vertex);
+ glGetShaderiv(vertex, GL_COMPILE_STATUS, &success);
+ if (!success) {
+ glGetShaderInfoLog(vertex, 512, NULL, info_log);
+ printf("%s\n", info_log);
+ ERROR("EXIT: vertex shader compilation failed");
+ exit(-1);
+ }
+
+ // fragment shader
+ u32 fragment = glCreateShader(GL_FRAGMENT_SHADER);
+ const char* fragment_shader_src = string_from_file(frag_shader);
+ if (fragment_shader_src == NULL) {
+ ERROR("EXIT: couldnt load shader");
+ exit(-1);
+ }
+ glShaderSource(fragment, 1, &fragment_shader_src, NULL);
+ glCompileShader(fragment);
+ glGetShaderiv(fragment, GL_COMPILE_STATUS, &success);
+ if (!success) {
+ glGetShaderInfoLog(fragment, 512, NULL, info_log);
+ printf("%s\n", info_log);
+ ERROR("EXIT: fragment shader compilation failed");
+ exit(-1);
+ }
+
+ u32 shader_prog;
+ shader_prog = glCreateProgram();
+
+ glAttachShader(shader_prog, vertex);
+ glAttachShader(shader_prog, fragment);
+ glLinkProgram(shader_prog);
+ glDeleteShader(vertex);
+ glDeleteShader(fragment);
+ free((char*)vertex_shader_src);
+ free((char*)fragment_shader_src);
+
+ return shader_prog;
+}
+
+static GLenum opengl_tex_type(GPU_TextureType tex_type) {
+ switch (tex_type) {
+ case TEXTURE_TYPE_2D:
+ return GL_TEXTURE_2D;
+ case TEXTURE_TYPE_CUBE_MAP:
+ return GL_TEXTURE_CUBE_MAP;
+ default:
+ return GL_TEXTURE_2D;
+ }
+}
+
+static GLenum opengl_prim_topology(PrimitiveTopology t) {
+ switch (t) {
+ case CEL_POINT:
+ return GL_POINT;
+ case CEL_LINE:
+ return GL_LINES;
+ case CEL_LINE_STRIP:
+ return GL_LINE_STRIP;
+ case CEL_TRI:
+ return GL_TRIANGLES;
+ case CEL_TRI_STRIP:
+ return GL_TRIANGLE_STRIP;
+ case PRIMITIVE_TOPOLOGY_COUNT:
+ WARN("Invalid PrimitiveTopology value");
+ break;
+ }
+}
+
+#endif
diff --git a/archive/src/ral/backends/vulkan/backend_vulkan.c b/archive/src/ral/backends/vulkan/backend_vulkan.c
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/archive/src/ral/backends/vulkan/backend_vulkan.c
diff --git a/archive/src/ral/backends/vulkan/backend_vulkan.h b/archive/src/ral/backends/vulkan/backend_vulkan.h
new file mode 100644
index 0000000..f31bed2
--- /dev/null
+++ b/archive/src/ral/backends/vulkan/backend_vulkan.h
@@ -0,0 +1,44 @@
+#pragma once
+
+#ifdef CEL_REND_BACKEND_VULKAN
+#include "defines.h"
+#include "maths_types.h"
+#include "ral.h"
+#include "ral_impl.h"
+#include "ral_types.h"
+
+#include <vulkan/vk_platform.h>
+#include <vulkan/vulkan.h>
+#include <vulkan/vulkan_core.h>
+
+// Provide definitions for RAL structs
+
+struct GPU_Swapchain {
+ VkSwapchainKHR handle;
+};
+
+struct GPU_Device {
+ VkPhysicalDevice physical_device;
+ VkDevice logical_device;
+};
+
+struct GPU_PipelineLayout {};
+struct GPU_Pipeline {};
+struct GPU_Renderpass {};
+struct GPU_CmdEncoder {};
+struct GPU_CmdBuffer {};
+struct GPU_Buffer {
+ VkBuffer handle;
+ VkDeviceMemory memory;
+ u64 size;
+};
+struct GPU_Texture {
+ VkImage handle;
+ VkDeviceMemory memory;
+ u64 size;
+ VkImageView view;
+ VkSampler sampler;
+ char* debug_label;
+};
+
+#endif
diff --git a/archive/src/ral/backends/vulkan/vulkan_glossary.md b/archive/src/ral/backends/vulkan/vulkan_glossary.md
new file mode 100644
index 0000000..4214f9d
--- /dev/null
+++ b/archive/src/ral/backends/vulkan/vulkan_glossary.md
@@ -0,0 +1,18 @@
+# Vulkan Glossary
+
+*from https://vkguide.dev/docs/introduction/vulkan_execution/*
+
+- **VkInstance**: The Vulkan context, used to access drivers.
+- **VkPhysicalDevice**: A GPU. Used to query physical GPU details, like features, capabilities, memory size, etc.
+- **VkDevice**: The “logical” GPU context that you actually execute things on.
+- **VkBuffer**: A chunk of GPU visible memory.
+- **VkImage**: A texture you can write to and read from.
+- **VkPipeline**: Holds the state of the gpu needed to draw. For example: shaders, rasterization options, depth settings.
+- **VkRenderPass**: Holds information about the images you are rendering into. All drawing commands have to be done inside a renderpass. Only used in legacy vkguide.
+- **VkFrameBuffer**: Holds the target images for a renderpass. Only used in legacy vkguide.
+- **VkCommandBuffer**: Encodes GPU commands. All execution that is performed on the GPU itself (not in the driver) has to be encoded in a VkCommandBuffer.
+- **VkQueue**: Execution “port” for commands. GPUs will have a set of queues with different properties. Some allow only graphics commands, others only allow memory commands, etc. Command buffers are executed by submitting them into a queue, which will copy the rendering commands onto the GPU for execution.
+- **VkDescriptorSet**: Holds the binding information that connects shader inputs to data such as VkBuffer resources and VkImage textures. Think of it as a set of gpu-side pointers that you bind once.
+- **VkSwapchainKHR**: Holds the images for the screen. It allows you to render things into a visible window. The KHR suffix shows that it comes from an extension, which in this case is VK_KHR_swapchain.
+- **VkSemaphore**: Synchronizes GPU to GPU execution of commands. Used for syncing multiple command buffer submissions one after another.
+- **VkFence**: Synchronizes GPU to CPU execution of commands. Used to know if a command buffer has finished being executed on the GPU.
diff --git a/archive/src/ral/backends/vulkan/vulkan_helpers.h b/archive/src/ral/backends/vulkan/vulkan_helpers.h
new file mode 100644
index 0000000..23666c6
--- /dev/null
+++ b/archive/src/ral/backends/vulkan/vulkan_helpers.h
@@ -0,0 +1,199 @@
+#pragma once
+
+#include <assert.h>
+#include <vulkan/vulkan.h>
+#include <vulkan/vulkan_core.h>
+
+#include "darray.h"
+#include "defines.h"
+#include "log.h"
+#include "str.h"
+
+#define VULKAN_PHYS_DEVICE_MAX_EXTENSION_NAMES 36
+
+DECL_TYPED_ARRAY(const char*, cstr)
+
+static void plat_get_required_extension_names(cstr_darray* extensions) {
+#ifdef CEL_PLATFORM_LINUX
+ cstr_darray_push(extensions, "VK_KHR_xcb_surface");
+#endif
+}
+
+// TODO(omni): port to using internal assert functions
+#define VK_CHECK(vulkan_expr) \
+ do { \
+ VkResult res = vulkan_expr; \
+ if (res != VK_SUCCESS) { \
+ ERROR_EXIT("Vulkan error: %u (%s:%d)", res, __FILE__, __LINE__); \
+ } \
+ } while (0)
+
+// TODO: typedef struct vk_debugger {} vk_debugger;
+
+typedef struct vulkan_physical_device_requirements {
+ bool graphics;
+ bool present;
+ bool compute;
+ bool transfer;
+ str8 device_ext_names[VULKAN_PHYS_DEVICE_MAX_EXTENSION_NAMES];
+ size_t device_ext_name_count;
+ bool sampler_anistropy;
+ bool discrete_gpu;
+} vulkan_physical_device_requirements;
+
+#define VULKAN_MAX_DEFAULT 32
+
+typedef struct vulkan_swapchain_support_info {
+ VkSurfaceCapabilitiesKHR capabilities;
+ VkSurfaceFormatKHR formats[VULKAN_MAX_DEFAULT];
+ u32 format_count;
+ VkPresentModeKHR present_modes[VULKAN_MAX_DEFAULT];
+ u32 mode_count;
+} vulkan_swapchain_support_info;
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vk_debug_callback(
+ VkDebugUtilsMessageSeverityFlagBitsEXT severity, VkDebugUtilsMessageTypeFlagsEXT flags,
+ const VkDebugUtilsMessengerCallbackDataEXT* callback_data, void* user_data);
+
+static void vulkan_device_query_swapchain_support(VkPhysicalDevice device, VkSurfaceKHR surface,
+ vulkan_swapchain_support_info* out_support_info) {
+ // TODO: add VK_CHECK to these calls!
+
+ // Surface capabilities
+ vkGetPhysicalDeviceSurfaceCapabilitiesKHR(device, surface, &out_support_info->capabilities);
+
+ // Surface formats
+ vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &out_support_info->format_count,
+ 0); // Get number of formats
+ if (out_support_info->format_count > 0) {
+ vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &out_support_info->format_count,
+ out_support_info->formats);
+ }
+
+ // Present Modes
+ vkGetPhysicalDeviceSurfacePresentModesKHR(device, surface, &out_support_info->mode_count,
+ 0); // Get number of formats
+ if (out_support_info->mode_count > 0) {
+ vkGetPhysicalDeviceSurfacePresentModesKHR(device, surface, &out_support_info->mode_count,
+ out_support_info->present_modes);
+ }
+}
+
+static VkSurfaceFormatKHR choose_swapchain_format(
+ vulkan_swapchain_support_info* swapchain_support) {
+ assert(swapchain_support->format_count > 0);
+ // find a format
+ for (u32 i = 0; i < swapchain_support->format_count; i++) {
+ VkSurfaceFormatKHR format = swapchain_support->formats[i];
+ if (format.format == VK_FORMAT_B8G8R8A8_SRGB &&
+ format.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) {
+ return format;
+ }
+ }
+ return swapchain_support->formats[0];
+}
+
+// static bool physical_device_meets_requirements(
+// VkPhysicalDevice device, VkSurfaceKHR surface, const VkPhysicalDeviceProperties* properties,
+// const VkPhysicalDeviceFeatures* features,
+// const vulkan_physical_device_requirements* requirements,
+// vulkan_physical_device_queue_family_info* out_queue_info,
+// vulkan_swapchain_support_info* out_swapchain_support) {
+// // TODO: pass in an arena
+
+// out_queue_info->graphics_family_index = -1;
+// out_queue_info->present_family_index = -1;
+// out_queue_info->compute_family_index = -1;
+// out_queue_info->transfer_family_index = -1;
+
+// if (requirements->discrete_gpu) {
+// if (properties->deviceType != VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) {
+// TRACE("Device is not a physical GPU. Skipping.");
+// return false;
+// }
+// }
+
+// u32 queue_family_count = 0;
+// vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, 0);
+// VkQueueFamilyProperties queue_families[queue_family_count];
+// vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, queue_families);
+
+// INFO("Graphics | Present | Compute | Transfer | Name");
+// u8 min_transfer_score = 255;
+// for (u32 i = 0; i < queue_family_count; i++) {
+// u8 current_transfer_score = 0;
+
+// // Graphics queue
+// if (queue_families[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
+// out_queue_info->graphics_family_index = i;
+// current_transfer_score++;
+// }
+
+// // Compute queue
+// if (queue_families[i].queueFlags & VK_QUEUE_COMPUTE_BIT) {
+// out_queue_info->compute_family_index = i;
+// current_transfer_score++;
+// }
+
+// // Transfer queue
+// if (queue_families[i].queueFlags & VK_QUEUE_TRANSFER_BIT) {
+// // always take the lowest score transfer index
+// if (current_transfer_score <= min_transfer_score) {
+// min_transfer_score = current_transfer_score;
+// out_queue_info->transfer_family_index = i;
+// }
+// }
+
+// // Present Queue
+// VkBool32 supports_present = VK_FALSE;
+// vkGetPhysicalDeviceSurfaceSupportKHR(device, i, surface, &supports_present);
+// if (supports_present) {
+// out_queue_info->present_family_index = i;
+// }
+// }
+
+// INFO(" %d | %d | %d | %d | %s",
+// out_queue_info->graphics_family_index != -1, out_queue_info->present_family_index != -1,
+// out_queue_info->compute_family_index != -1, out_queue_info->transfer_family_index != -1,
+// properties->deviceName);
+// TRACE("Graphics Family queue index: %d", out_queue_info->graphics_family_index);
+// TRACE("Present Family queue index: %d", out_queue_info->present_family_index);
+// TRACE("Compute Family queue index: %d", out_queue_info->compute_family_index);
+// TRACE("Transfer Family queue index: %d", out_queue_info->transfer_family_index);
+
+// if ((!requirements->graphics ||
+// (requirements->graphics && out_queue_info->graphics_family_index != -1))) {
+// INFO("Physical device meets our requirements! Proceed.");
+
+// vulkan_device_query_swapchain_support(
+// device, surface, out_swapchain_support
+
+// // TODO: error handling i.e. format count = 0 or present mode = 0
+
+// );
+// return true;
+// }
+
+// return false;
+// }
+
+VKAPI_ATTR VkBool32 VKAPI_CALL vk_debug_callback(
+ VkDebugUtilsMessageSeverityFlagBitsEXT severity, VkDebugUtilsMessageTypeFlagsEXT flags,
+ const VkDebugUtilsMessengerCallbackDataEXT* callback_data, void* user_data) {
+ switch (severity) {
+ default:
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT:
+ ERROR("%s", callback_data->pMessage);
+ break;
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT:
+ WARN("%s", callback_data->pMessage);
+ break;
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT:
+ INFO("%s", callback_data->pMessage);
+ break;
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT:
+ TRACE("%s", callback_data->pMessage);
+ break;
+ }
+ return VK_FALSE;
+} \ No newline at end of file
diff --git a/archive/src/ral/ral.h b/archive/src/ral/ral.h
new file mode 100644
index 0000000..fdbadd3
--- /dev/null
+++ b/archive/src/ral/ral.h
@@ -0,0 +1,5 @@
+#pragma once
+
+#include "ral_common.h"
+#include "ral_impl.h"
+#include "ral_types.h" \ No newline at end of file
diff --git a/archive/src/ral/ral_common.c b/archive/src/ral/ral_common.c
new file mode 100644
index 0000000..d921ac4
--- /dev/null
+++ b/archive/src/ral/ral_common.c
@@ -0,0 +1,70 @@
+#include "ral_common.h"
+#include "ral_impl.h"
+
+void BackendPools_Init(arena* a, GPU_BackendPools* backend_pools) {
+ PipelineLayout_pool pipeline_layout_pool =
+ PipelineLayout_pool_create(a, MAX_PIPELINES, sizeof(GPU_PipelineLayout));
+ backend_pools->pipeline_layouts = pipeline_layout_pool;
+ Pipeline_pool pipeline_pool = Pipeline_pool_create(a, MAX_PIPELINES, sizeof(GPU_Pipeline));
+ backend_pools->pipelines = pipeline_pool;
+ Renderpass_pool rpass_pool = Renderpass_pool_create(a, MAX_RENDERPASSES, sizeof(GPU_Renderpass));
+ backend_pools->renderpasses = rpass_pool;
+}
+
+void ResourcePools_Init(arena* a, struct ResourcePools* res_pools) {
+ Buffer_pool buf_pool = Buffer_pool_create(a, MAX_BUFFERS, sizeof(GPU_Buffer));
+ res_pools->buffers = buf_pool;
+ Texture_pool tex_pool = Texture_pool_create(a, MAX_TEXTURES, sizeof(GPU_Texture));
+ res_pools->textures = tex_pool;
+}
+
+VertexDescription static_3d_vertex_description() {
+ VertexDescription builder = { .debug_label = "Standard static 3d vertex format" };
+ VertexDesc_AddAttr(&builder, "inPosition", ATTR_F32x3);
+ VertexDesc_AddAttr(&builder, "inNormal", ATTR_F32x3);
+ VertexDesc_AddAttr(&builder, "inTexCoords", ATTR_F32x2);
+ builder.use_full_vertex_size = true;
+ return builder;
+}
+
+void VertexDesc_AddAttr(VertexDescription* builder, const char* name, VertexAttribType type) {
+ u32 i = builder->attributes_count;
+
+ size_t size = VertexAttribSize(type);
+ builder->attributes[i] = type;
+ // builder->stride += size;
+ builder->attr_names[i] = name;
+
+ builder->attributes_count++;
+}
+
+size_t VertexAttribSize(VertexAttribType attr) {
+ switch (attr) {
+ case ATTR_F32:
+ case ATTR_U32:
+ case ATTR_I32:
+ return 4;
+ case ATTR_F32x2:
+ case ATTR_U32x2:
+ case ATTR_I32x2:
+ return 8;
+ case ATTR_F32x3:
+ case ATTR_U32x3:
+ case ATTR_I32x3:
+ return 12;
+ case ATTR_F32x4:
+ case ATTR_U32x4:
+ case ATTR_I32x4:
+ return 16;
+ break;
+ }
+}
+
+size_t VertexDesc_CalcStride(VertexDescription* desc) {
+ size_t stride = 0;
+ for (int i = 0; i < desc->attributes_count; i++) {
+ size_t size = VertexAttribSize(desc->attributes[i]);
+ stride += size;
+ }
+ return stride;
+}
diff --git a/archive/src/ral/ral_common.h b/archive/src/ral/ral_common.h
new file mode 100644
index 0000000..5a797e5
--- /dev/null
+++ b/archive/src/ral/ral_common.h
@@ -0,0 +1,61 @@
+/**
+ * @brief Common functions that don't actually depend on the specific backend
+ */
+#pragma once
+#include "buf.h"
+#include "defines.h"
+#include "mem.h"
+#include "ral_types.h"
+// #include "ral_impl.h"
+
+// Concrete implementation
+#if defined(CEL_REND_BACKEND_OPENGL)
+#include "backend_opengl.h"
+#endif
+
+TYPED_POOL(GPU_Buffer, Buffer);
+TYPED_POOL(GPU_Texture, Texture);
+TYPED_POOL(GPU_PipelineLayout, PipelineLayout);
+TYPED_POOL(GPU_Pipeline, Pipeline);
+TYPED_POOL(GPU_Renderpass, Renderpass);
+
+// --- Handy macros
+#define BUFFER_GET(h) (Buffer_pool_get(&context.resource_pools->buffers, h))
+#define TEXTURE_GET(h) (Texture_pool_get(&context.resource_pools->textures, h))
+
+// --- Views
+typedef struct GPU_BufferView {
+ BufferHandle buf;
+ size_t offset;
+ size_t bytes;
+} GPU_BufferView;
+
+// --- Pools
+typedef struct GPU_BackendPools {
+ Pipeline_pool pipelines;
+ PipelineLayout_pool pipeline_layouts;
+ Renderpass_pool renderpasses;
+} GPU_BackendPools;
+void BackendPools_Init(arena* a, GPU_BackendPools* backend_pools);
+
+struct ResourcePools {
+ Buffer_pool buffers;
+ Texture_pool textures;
+};
+typedef struct ResourcePools ResourcePools;
+void ResourcePools_Init(arena* a, struct ResourcePools* res_pools);
+
+PUB GPU_Renderpass* GPU_GetDefaultRenderpass(); // returns a renderpass that draws directly to
+ // default framebuffer with default depth
+
+// --
+// window resize callback
+void GPU_WindowResizedCallback(u32 x, u32 y);
+
+// --- Vertex formats
+VertexDescription static_3d_vertex_description();
+
+void VertexDesc_AddAttr(VertexDescription* builder, const char* name, VertexAttribType type);
+size_t VertexDesc_CalcStride(VertexDescription* desc);
+
+size_t VertexAttribSize(VertexAttribType attr);
diff --git a/archive/src/ral/ral_impl.h b/archive/src/ral/ral_impl.h
new file mode 100644
index 0000000..16c9767
--- /dev/null
+++ b/archive/src/ral/ral_impl.h
@@ -0,0 +1,102 @@
+/**
+ * @brief
+ */
+#pragma once
+#include "buf.h"
+#include "defines.h"
+#include "ral_types.h"
+
+struct GLFWwindow;
+struct ResourcePools;
+
+// Forward declare structs - these must be defined in the backend implementation
+typedef struct GPU_Swapchain GPU_Swapchain;
+typedef struct GPU_Device GPU_Device;
+typedef struct GPU_PipelineLayout GPU_PipelineLayout;
+typedef struct GPU_Pipeline GPU_Pipeline;
+typedef struct GPU_Renderpass GPU_Renderpass;
+typedef struct GPU_CmdEncoder GPU_CmdEncoder; // Recording
+typedef struct GPU_CmdBuffer GPU_CmdBuffer; // Ready for submission
+typedef struct GPU_Buffer GPU_Buffer;
+typedef struct GPU_Texture GPU_Texture;
+
+bool GPU_Backend_Init(const char* window_name, struct GLFWwindow* window,
+ struct ResourcePools* res_pools);
+void GPU_Backend_Shutdown();
+
+bool GPU_Device_Create(GPU_Device* out_device);
+void GPU_Device_Destroy(GPU_Device* device);
+
+bool GPU_Swapchain_Create(GPU_Swapchain* out_swapchain);
+void GPU_Swapchain_Destroy(GPU_Swapchain* swapchain);
+void GPU_Swapchain_Resize(i32 new_width, i32 new_height);
+u32x2 GPU_Swapchain_GetDimensions();
+
+PUB GPU_Renderpass* GPU_Renderpass_Create(GPU_RenderpassDesc description);
+PUB void GPU_Renderpass_Destroy(GPU_Renderpass* pass);
+
+PUB GPU_Pipeline* GPU_GraphicsPipeline_Create(GraphicsPipelineDesc description,
+ GPU_Renderpass* renderpass);
+PUB void GraphicsPipeline_Destroy(GPU_Pipeline* pipeline);
+
+// --- Command buffer
+PUB GPU_CmdEncoder GPU_CmdEncoder_Create();
+PUB void GPU_CmdEncoder_Destroy(GPU_CmdEncoder* encoder);
+PUB void GPU_CmdEncoder_Begin(GPU_CmdEncoder* encoder);
+PUB void GPU_CmdEncoder_Finish(GPU_CmdEncoder* encoder);
+PUB void GPU_CmdEncoder_BeginRender(GPU_CmdEncoder* encoder, GPU_Renderpass* renderpass);
+PUB void GPU_CmdEncoder_EndRender(GPU_CmdEncoder* encoder);
+PUB GPU_CmdEncoder* GPU_GetDefaultEncoder();
+PUB void GPU_QueueSubmit(GPU_CmdBuffer* cmd_buffer);
+
+// --- Buffers
+PUB BufferHandle GPU_BufferCreate(u64 size, GPU_BufferType buf_type, GPU_BufferFlags flags,
+ const void* data);
+PUB void GPU_BufferDestroy(BufferHandle handle);
+PUB void GPU_BufferUpload(BufferHandle buffer, size_t n_bytes, const void* data);
+
+// --- Textures
+PUB TextureHandle GPU_TextureCreate(TextureDesc desc, bool create_view, const void* data);
+PUB GPU_Texture* GPU_TextureAlloc(TextureHandle* out_handle);
+PUB void GPU_TextureDestroy(TextureHandle handle);
+PUB void GPU_TextureUpload(TextureHandle handle, size_t n_bytes, const void* data);
+
+// --- Data copy commands
+PUB void GPU_EncodeCopyBufToBuf(GPU_CmdEncoder* encoder, BufferHandle src, u64 src_offset,
+ BufferHandle dst, u64 dst_offset, u64 copy_size);
+
+// PUB void GPU_EncodeCopyBufToTex(GPU_CmdEncoder* encoder, BufferHandle src, TextureHandle dst,
+// u32 x_offset, u32 y_offset, u32 width, u32 height, const void* data);
+/** @brief Convenience method for writing data directly into a texture. Staging memory is handled
+ * internally. */
+PUB void GPU_WriteTextureRegion(GPU_CmdEncoder* encoder, TextureHandle dst, u32 x_offset,
+ u32 y_offset, u32 width, u32 height, const void* data);
+PUB void GPU_WriteBuffer(GPU_CmdEncoder* encoder, BufferHandle buf, u64 offset, u64 size,
+ const void* data);
+
+// --- Render commands
+PUB void GPU_EncodeBindPipeline(GPU_CmdEncoder* encoder, GPU_Pipeline* pipeline);
+PUB void GPU_EncodeBindShaderData(GPU_CmdEncoder* encoder, u32 group, ShaderDataLayout layout);
+void GPU_EncodeSetDefaults(GPU_CmdEncoder* encoder);
+PUB void GPU_EncodeSetVertexBuffer(GPU_CmdEncoder* encoder, BufferHandle buf);
+PUB void GPU_EncodeSetIndexBuffer(GPU_CmdEncoder* encoder, BufferHandle buf);
+
+PUB void GPU_EncodeDraw(GPU_CmdEncoder* encoder, PrimitiveTopology topology, u64 count);
+PUB void GPU_EncodeDrawIndexed(GPU_CmdEncoder* encoder, PrimitiveTopology topology,
+ u64 index_count);
+// convenience versions of the above
+PUB void GPU_EncodeDrawTris(GPU_CmdEncoder* encoder, u64 count);
+PUB void GPU_EncodeDrawIndexedTris(GPU_CmdEncoder* encoder, u64 index_count);
+PUB void GPU_EncodeDrawInstanced(GPU_CmdEncoder* encoder, u64 index_count,
+ u64 instance_count); // TODO: implement instanced rendering
+
+// --- Frame cycle
+PUB bool GPU_Backend_BeginFrame();
+PUB void GPU_Backend_EndFrame();
+
+// Concrete implementation
+#if defined(CEL_REND_BACKEND_OPENGL)
+#include "backend_opengl.h"
+#elif defined(CEL_REND_BACKEND_VULKAN)
+#include "backend_vulkan.h"
+#endif
diff --git a/archive/src/ral/ral_types.h b/archive/src/ral/ral_types.h
new file mode 100644
index 0000000..fde3bed
--- /dev/null
+++ b/archive/src/ral/ral_types.h
@@ -0,0 +1,168 @@
+#pragma once
+#include "darray.h"
+#include "defines.h"
+#include "maths_types.h"
+#include "str.h"
+
+// --- Max size constants
+#define MAX_SHADER_DATA_LAYOUTS 8
+#define MAX_SHADER_BINDINGS 8
+#define MAX_BUFFERS 256
+#define MAX_TEXTURES 256
+#define MAX_PIPELINES 128
+#define MAX_RENDERPASSES 128
+#define MAX_VERTEX_ATTRIBUTES 16
+
+// --- Handle types
+CORE_DEFINE_HANDLE(BufferHandle);
+CORE_DEFINE_HANDLE(TextureHandle);
+CORE_DEFINE_HANDLE(SamplerHandle);
+CORE_DEFINE_HANDLE(ShaderHandle);
+CORE_DEFINE_HANDLE(PipelineLayoutHandle);
+CORE_DEFINE_HANDLE(PipelineHandle);
+CORE_DEFINE_HANDLE(RenderpassHandle);
+#define INVALID_TEX_HANDLE ((TextureHandle){ .raw = 9999981 })
+
+// --- Buffers
+typedef enum GPU_BufferType {
+ BUFFER_DEFAULT, // on Vulkan this would be a storage buffer?
+ BUFFER_VERTEX,
+ BUFFER_INDEX,
+ BUFFER_UNIFORM,
+ BUFFER_COUNT
+} GPU_BufferType;
+
+static const char* buffer_type_names[] = {
+ "RAL Buffer Default", "RAL Buffer Vertex", "RAL Buffer Index",
+ "RAL Buffer Uniform", "RAL Buffer Count",
+};
+
+typedef enum GPU_BufferFlag {
+ BUFFER_FLAG_CPU = 1 << 0,
+ BUFFER_FLAG_GPU = 1 << 1,
+ BUFFER_FLAG_STORAGE = 1 << 2,
+ BUFFER_FLAG_COUNT
+} GPU_BufferFlag;
+typedef u32 GPU_BufferFlags;
+
+static const char* texture_type_names[] = {
+ "RAL Texture 2D", "RAL Texture 3D", "RAL Texture 2D Array",
+ "RAL Texture Cubemap", "RAL Texture Count",
+};
+
+typedef enum GPU_TextureFormat {
+ TEXTURE_FORMAT_8_8_8_8_RGBA_UNORM,
+ TEXTURE_FORMAT_8_8_8_RGB_UNORM,
+ TEXTURE_FORMAT_DEPTH_DEFAULT,
+ TEXTURE_FORMAT_COUNT
+} GPU_TextureFormat;
+
+// --- Vertices
+
+typedef enum VertexFormat {
+ VERTEX_STATIC_3D,
+ VERTEX_SPRITE,
+ VERTEX_SKINNED,
+ VERTEX_COLOURED_STATIC_3D,
+ VERTEX_RAW_POS_COLOUR,
+ VERTEX_POS_ONLY,
+ VERTEX_COUNT
+} VertexFormat;
+
+#ifndef TYPED_VERTEX_ARRAY
+KITC_DECL_TYPED_ARRAY(Vertex);
+KITC_DECL_TYPED_ARRAY(u32)
+#define TYPED_VERTEX_ARRAY
+#endif
+
+/// @strip_prefix(ATTR_)
+typedef enum VertexAttribType {
+ ATTR_F32,
+ ATTR_F32x2,
+ ATTR_F32x3,
+ ATTR_F32x4,
+ ATTR_U32,
+ ATTR_U32x2,
+ ATTR_U32x3,
+ ATTR_U32x4,
+ ATTR_I32,
+ ATTR_I32x2,
+ ATTR_I32x3,
+ ATTR_I32x4,
+} VertexAttribType;
+
+typedef struct VertexDescription {
+ const char* debug_label;
+ const char* attr_names[MAX_VERTEX_ATTRIBUTES];
+ VertexAttribType attributes[MAX_VERTEX_ATTRIBUTES];
+ u32 attributes_count;
+ // size_t stride;
+ bool use_full_vertex_size;
+} VertexDescription;
+
+// --- Shaders
+typedef enum PipelineKind {
+ PIPELINE_GRAPHICS,
+ PIPELINE_COMPUTE,
+} PipelineKind;
+
+typedef struct ShaderDesc {
+ const char* debug_name;
+ Str8 filepath; // Where it came from
+ Str8 code; // Either GLSL or SPIRV bytecode
+ bool is_spirv;
+ bool is_combined_vert_frag; // Contains both vertex and fragment stages
+} ShaderDesc;
+
+typedef ShaderDataLayout (*FN_GetBindingLayout)(void* data);
+
+/** @brief takes a `ShaderDataLayout` without data, and puts the correct data into each binding */
+typedef void (*FN_BindShaderData)(ShaderDataLayout* layout, const void* data);
+
+// typedef struct ShaderData {
+// FN_GetBindingLayout get_layout;
+// void* data;
+// } ShaderData;
+
+typedef enum PrimitiveTopology {
+#ifdef TOPOLOGY_SHORT_NAMES
+ CEL_POINT,
+ CEL_LINE,
+ CEL_LINE_STRIP,
+ CEL_TRI,
+ CEL_TRI_STRIP,
+ PRIMITIVE_TOPOLOGY_COUNT
+#else
+ PRIMITIVE_TOPOLOGY_POINT,
+ PRIMITIVE_TOPOLOGY_LINE,
+ PRIMITIVE_TOPOLOGY_LINE_STRIP,
+ PRIMITIVE_TOPOLOGY_TRIANGLE,
+ PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
+ PRIMITIVE_TOPOLOGY_COUNT
+#endif
+} PrimitiveTopology;
+
+typedef enum Winding { WINDING_CCW, WINDING_CW } Winding;
+
+// based on https://registry.khronos.org/OpenGL-Refpages/gl4/html/glDepthFunc.xhtml
+typedef enum CompareFunc {
+ COMPARE_NEVER,
+ COMPARE_LESS,
+ COMPARE_EQUAL,
+ COMPARE_LESS_EQUAL,
+ COMPARE_GREATER,
+ COMPARE_NOT_EQUAL,
+ COMPARE_GREATER_EQUAL,
+ COMPARE_ALWAYS,
+ COMPARE_COUNT
+} CompareFunc;
+
+bool GraphicsPipelineDesc_AddShaderDataLayout(GraphicsPipelineDesc* desc, ShaderDataLayout layout);
+
+typedef struct GPU_RenderpassDesc {
+ bool default_framebuffer;
+ bool has_color_target;
+ TextureHandle color_target; // for now only support one
+ bool has_depth_stencil;
+ TextureHandle depth_stencil;
+} GPU_RenderpassDesc;