diff options
author | Joshua Rowe <17525998+omnisci3nce@users.noreply.github.com> | 2024-06-09 14:59:01 +1000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-06-09 14:59:01 +1000 |
commit | 9c79df522980eabdc5e52592cbd152e2a285c4cc (patch) | |
tree | 9082af194033e5e3e4a770456209d3bac7784943 /src | |
parent | 8d116bd23d9441e33cb3377e90c08169109b438a (diff) | |
parent | d4ff15d9cd82a6e3bc71da9d04ee0f250460cef1 (diff) |
Merge pull request #16 from omnisci3nce/port-opengl-ral
Bring back OpenGL (part 1)
Diffstat (limited to 'src')
-rw-r--r-- | src/defines.h | 9 | ||||
-rw-r--r-- | src/maths/maths.h | 1 | ||||
-rw-r--r-- | src/renderer/archive/old_backend_vulkan.c | 6 | ||||
-rw-r--r-- | src/renderer/archive/render_backend.h | 2 | ||||
-rw-r--r-- | src/renderer/archive/render_types.h | 8 | ||||
-rw-r--r-- | src/renderer/backends/backend_opengl.c | 171 | ||||
-rw-r--r-- | src/renderer/backends/backend_vulkan.c | 37 | ||||
-rw-r--r-- | src/renderer/backends/backend_vulkan.h | 5 | ||||
-rw-r--r-- | src/renderer/backends/metal/backend_metal.h | 74 | ||||
-rw-r--r-- | src/renderer/backends/metal/backend_metal.m | 285 | ||||
-rw-r--r-- | src/renderer/backends/opengl/backend_opengl.c | 391 | ||||
-rw-r--r-- | src/renderer/backends/opengl/backend_opengl.h | 53 | ||||
-rw-r--r-- | src/renderer/backends/opengl/opengl_helpers.h | 73 | ||||
-rw-r--r-- | src/renderer/ral.c | 34 | ||||
-rw-r--r-- | src/renderer/ral.h | 12 | ||||
-rw-r--r-- | src/renderer/ral_types.h | 11 | ||||
-rw-r--r-- | src/renderer/render.c | 1 | ||||
-rw-r--r-- | src/renderer/render_types.h | 7 | ||||
-rw-r--r-- | src/resources/gltf.c | 5 | ||||
-rw-r--r-- | src/resources/obj.c | 4 | ||||
-rw-r--r-- | src/std/mem.c | 9 | ||||
-rw-r--r-- | src/std/mem.h | 5 |
22 files changed, 988 insertions, 215 deletions
diff --git a/src/defines.h b/src/defines.h index 4b6f8c7..9050f25 100644 --- a/src/defines.h +++ b/src/defines.h @@ -64,10 +64,11 @@ Renderer backend defines: #define CEL_REND_BACKEND_METAL 1 */ +// NOTE: The below is now handled in xmake.lua // Platform will inform renderer backend (unless user overrides) #if defined(CEL_PLATFORM_LINUX) -// #define CEL_REND_BACKEND_OPENGL 1 -#define CEL_REND_BACKEND_VULKAN 1 +#define CEL_REND_BACKEND_OPENGL 1 +// #define CEL_REND_BACKEND_VULKAN 1 #endif #if defined(CEL_PLATFORM_WINDOWS) @@ -76,6 +77,6 @@ Renderer backend defines: #endif #if defined(CEL_PLATFORM_MAC) -// #define CEL_REND_BACKEND_METAL 1 -#define CEL_REND_BACKEND_OPENGL 1 +#define CEL_REND_BACKEND_METAL 1 +// #define CEL_REND_BACKEND_OPENGL 1 #endif
\ No newline at end of file diff --git a/src/maths/maths.h b/src/maths/maths.h index 217f2e0..45d69c1 100644 --- a/src/maths/maths.h +++ b/src/maths/maths.h @@ -10,6 +10,7 @@ #include <math.h> #include <stdio.h> +#include "defines.h" #include "maths_types.h" // --- Helpers diff --git a/src/renderer/archive/old_backend_vulkan.c b/src/renderer/archive/old_backend_vulkan.c index a18ca70..0dfba98 100644 --- a/src/renderer/archive/old_backend_vulkan.c +++ b/src/renderer/archive/old_backend_vulkan.c @@ -1,8 +1,8 @@ -#include "camera.h" -#include "primitives.h" #define CDEBUG -#define CEL_REND_BACKEND_VULKAN 1 +// #define CEL_REND_BACKEND_VULKAN 1 #if CEL_REND_BACKEND_VULKAN +#include "camera.h" +#include "primitives.h" // ^ Temporary #include <assert.h> diff --git a/src/renderer/archive/render_backend.h b/src/renderer/archive/render_backend.h index da30bcc..6df9c81 100644 --- a/src/renderer/archive/render_backend.h +++ b/src/renderer/archive/render_backend.h @@ -22,7 +22,7 @@ void clear_screen(vec3 colour); void texture_data_upload(texture* tex); void bind_texture(shader s, texture* tex, u32 slot); void bind_mesh_vertex_buffer(void* backend, mesh* mesh); -void draw_primitives(cel_primitive_topology primitive, u32 start_index, u32 count); +// void draw_primitives(cel_primitive_topology primitive, u32 start_index, u32 count); shader shader_create_separate(const char* vert_shader, const char* frag_shader); void set_shader(shader s); diff --git a/src/renderer/archive/render_types.h b/src/renderer/archive/render_types.h index f5ea986..5ee3316 100644 --- a/src/renderer/archive/render_types.h +++ b/src/renderer/archive/render_types.h @@ -147,10 +147,10 @@ typedef struct mesh { u32 vbo, vao; /** OpenGL data. TODO: dont leak OpenGL details */ } mesh; -// #ifndef TYPED_MESH_ARRAY -// KITC_DECL_TYPED_ARRAY(mesh) // creates "mesh_darray" -// #define TYPED_MESH_ARRAY -// #endif +#ifndef TYPED_MESH_ARRAY +KITC_DECL_TYPED_ARRAY(mesh) // creates "mesh_darray" +#define TYPED_MESH_ARRAY +#endif typedef struct model { str8 name; diff --git a/src/renderer/backends/backend_opengl.c b/src/renderer/backends/backend_opengl.c deleted file mode 100644 index 4cd97b5..0000000 --- a/src/renderer/backends/backend_opengl.c +++ /dev/null @@ -1,171 +0,0 @@ -#include <stdlib.h> -#include "camera.h" -#define CEL_PLATFORM_LINUX - -#include "defines.h" -#include "file.h" -#include "log.h" -#include "maths_types.h" -#include "ral.h" - -#if CEL_REND_BACKEND_OPENGL - -#include <glad/glad.h> - -#include <glfw3.h> - -/** @brief Internal backend state */ -typedef struct opengl_state { -} opengl_state; - -bool gfx_backend_init(renderer *ren) { - INFO("loading OpenGL backend"); - - // glfwInit(); // Already handled in `renderer_init` - glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4); - glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1); - glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); - glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); - - // glad: load all OpenGL function pointers - if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress)) { - ERROR("Failed to initialise GLAD \n"); - - return false; - } - - glEnable(GL_DEPTH_TEST); - - opengl_state *internal = malloc(sizeof(opengl_state)); - ren->backend_context = (void *)internal; - - return true; -} - -void gfx_backend_draw_frame(renderer *ren, camera *cam, mat4 model, texture *tex) {} - -void gfx_backend_shutdown(renderer *ren) {} - -void uniform_vec3f(u32 program_id, const char *uniform_name, vec3 *value) { - glUniform3fv(glGetUniformLocation(program_id, uniform_name), 1, &value->x); -} -void uniform_f32(u32 program_id, const char *uniform_name, f32 value) { - glUniform1f(glGetUniformLocation(program_id, uniform_name), value); -} -void uniform_i32(u32 program_id, const char *uniform_name, i32 value) { - glUniform1i(glGetUniformLocation(program_id, uniform_name), value); -} -void uniform_mat4f(u32 program_id, const char *uniform_name, mat4 *value) { - glUniformMatrix4fv(glGetUniformLocation(program_id, uniform_name), 1, GL_FALSE, value->data); -} - -void clear_screen(vec3 colour) { - glClearColor(colour.x, colour.y, colour.z, 1.0f); - glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); -} - -void texture_data_upload(texture *tex) { - printf("Texture name %s\n", tex->name); - TRACE("Upload texture data"); - u32 texture_id; - glGenTextures(1, &texture_id); - glBindTexture(GL_TEXTURE_2D, texture_id); - tex->texture_id = texture_id; - - // set the texture wrapping parameters - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, - GL_REPEAT); // set texture wrapping to GL_REPEAT (default wrapping method) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); - // set texture filtering parameters - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); - - glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, tex->width, tex->height, 0, tex->channel_type, - GL_UNSIGNED_BYTE, tex->image_data); - glGenerateMipmap(GL_TEXTURE_2D); - DEBUG("Freeing texture image data after uploading to GPU"); - // stbi_image_free(tex->image_data); // data is on gpu now so we dont need it around -} - -void bind_texture(shader s, texture *tex, u32 slot) { - // printf("bind texture slot %d with texture id %d \n", slot, tex->texture_id); - glActiveTexture(GL_TEXTURE0 + slot); - glBindTexture(GL_TEXTURE_2D, tex->texture_id); -} - -void bind_mesh_vertex_buffer(void *_backend, mesh *mesh) { glBindVertexArray(mesh->vao); } - -static inline GLenum to_gl_prim_topology(enum cel_primitive_topology primitive) { - switch (primitive) { - case CEL_PRIMITIVE_TOPOLOGY_TRIANGLE: - return GL_TRIANGLES; - case CEL_PRIMITIVE_TOPOLOGY_POINT: - case CEL_PRIMITIVE_TOPOLOGY_LINE: - case CEL_PRIMITIVE_TOPOLOGY_LINE_STRIP: - case CEL_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP: - case CEL_PRIMITIVE_TOPOLOGY_COUNT: - break; - } -} - -void draw_primitives(cel_primitive_topology primitive, u32 start_index, u32 count) { - u32 gl_primitive = to_gl_prim_topology(primitive); - glDrawArrays(gl_primitive, start_index, count); -} - -shader shader_create_separate(const char *vert_shader, const char *frag_shader) { - INFO("Load shaders at %s and %s", vert_shader, frag_shader); - int success; - char info_log[512]; - - u32 vertex = glCreateShader(GL_VERTEX_SHADER); - const char *vertex_shader_src = string_from_file(vert_shader); - if (vertex_shader_src == NULL) { - ERROR("EXIT: couldnt load shader"); - exit(-1); - } - glShaderSource(vertex, 1, &vertex_shader_src, NULL); - glCompileShader(vertex); - glGetShaderiv(vertex, GL_COMPILE_STATUS, &success); - if (!success) { - glGetShaderInfoLog(vertex, 512, NULL, info_log); - printf("%s\n", info_log); - ERROR("EXIT: vertex shader compilation failed"); - exit(-1); - } - - // fragment shader - u32 fragment = glCreateShader(GL_FRAGMENT_SHADER); - const char *fragment_shader_src = string_from_file(frag_shader); - if (fragment_shader_src == NULL) { - ERROR("EXIT: couldnt load shader"); - exit(-1); - } - glShaderSource(fragment, 1, &fragment_shader_src, NULL); - glCompileShader(fragment); - glGetShaderiv(fragment, GL_COMPILE_STATUS, &success); - if (!success) { - glGetShaderInfoLog(fragment, 512, NULL, info_log); - printf("%s\n", info_log); - ERROR("EXIT: fragment shader compilation failed"); - exit(-1); - } - - u32 shader_prog; - shader_prog = glCreateProgram(); - - glAttachShader(shader_prog, vertex); - glAttachShader(shader_prog, fragment); - glLinkProgram(shader_prog); - glDeleteShader(vertex); - glDeleteShader(fragment); - free((char *)vertex_shader_src); - free((char *)fragment_shader_src); - - shader s = { .program_id = shader_prog }; - return s; -} - -void set_shader(shader s) { glUseProgram(s.program_id); } - -#endif
\ No newline at end of file diff --git a/src/renderer/backends/backend_vulkan.c b/src/renderer/backends/backend_vulkan.c index 2f8fdf7..8801230 100644 --- a/src/renderer/backends/backend_vulkan.c +++ b/src/renderer/backends/backend_vulkan.c @@ -1,5 +1,10 @@ -#include <assert.h> +#include "defines.h" +#if defined(CEL_REND_BACKEND_VULKAN) + +#define GLFW_INCLUDE_VULKAN #include <glfw3.h> + +#include <assert.h> #include <stddef.h> #include <stdint.h> #include <stdlib.h> @@ -17,7 +22,6 @@ #include "str.h" #include "vulkan_helpers.h" -#include "defines.h" #include "file.h" #include "log.h" #include "ral.h" @@ -401,6 +405,7 @@ VkFormat format_from_vertex_attr(vertex_attrib_type attr) { } gpu_pipeline* gpu_graphics_pipeline_create(struct graphics_pipeline_desc description) { + TRACE("GPU Graphics Pipeline creation"); // Allocate gpu_pipeline_layout* layout = pipeline_layout_pool_alloc(&context.gpu_pools.pipeline_layouts, NULL); @@ -1634,7 +1639,7 @@ texture_handle gpu_texture_create(texture_desc desc, bool create_view, const voi &texture->sampler); if (res != VK_SUCCESS) { ERROR("Error creating texture sampler for image %s", texture->debug_label); - return; + exit(1); } return handle; @@ -1688,23 +1693,13 @@ void vulkan_transition_image_layout(gpu_texture* texture, VkFormat format, VkIma /* TYPED_POOL(gpu_buffer, buffer); */ /* TYPED_POOL(gpu_texture, texture); */ -void resource_pools_init(arena* a, struct resource_pools* res_pools) { - buffer_pool buf_pool = buffer_pool_create(a, MAX_BUFFERS, sizeof(gpu_buffer)); - res_pools->buffers = buf_pool; - texture_pool tex_pool = texture_pool_create(a, MAX_TEXTURES, sizeof(gpu_texture)); - res_pools->textures = tex_pool; +/* void resource_pools_init(arena* a, struct resource_pools* res_pools) { */ +/* buffer_pool buf_pool = buffer_pool_create(a, MAX_BUFFERS, sizeof(gpu_buffer)); */ +/* res_pools->buffers = buf_pool; */ +/* texture_pool tex_pool = texture_pool_create(a, MAX_TEXTURES, sizeof(gpu_texture)); */ +/* res_pools->textures = tex_pool; */ - context.resource_pools = res_pools; -} +/* context.resource_pools = res_pools; */ +/* } */ -void backend_pools_init(arena* a, gpu_backend_pools* backend_pools) { - pipeline_layout_pool pipeline_layout_pool = - pipeline_layout_pool_create(a, MAX_PIPELINES, sizeof(gpu_pipeline_layout)); - backend_pools->pipeline_layouts = pipeline_layout_pool; - pipeline_pool pipeline_pool = pipeline_pool_create(a, MAX_PIPELINES, sizeof(gpu_pipeline)); - backend_pools->pipelines = pipeline_pool; - renderpass_pool rpass_pool = renderpass_pool_create(a, MAX_RENDERPASSES, sizeof(gpu_renderpass)); - backend_pools->renderpasses = rpass_pool; - - context.gpu_pools; -} +#endif diff --git a/src/renderer/backends/backend_vulkan.h b/src/renderer/backends/backend_vulkan.h index dc0f7bd..6ca0bb5 100644 --- a/src/renderer/backends/backend_vulkan.h +++ b/src/renderer/backends/backend_vulkan.h @@ -1,9 +1,10 @@ #pragma once +#include "defines.h" +#if defined(CEL_REND_BACKEND_VULKAN) #include <vulkan/vk_platform.h> #include <vulkan/vulkan.h> #include <vulkan/vulkan_core.h> -#include "defines.h" #include "mem.h" #include "ral.h" #include "ral_types.h" @@ -35,6 +36,7 @@ typedef struct gpu_swapchain { VkSwapchainKHR handle; arena swapchain_arena; VkExtent2D extent; + u32x2 dimensions; VkSurfaceFormatKHR image_format; VkPresentModeKHR present_mode; u32 image_count; @@ -113,3 +115,4 @@ typedef struct gpu_texture { VkSampler sampler; char* debug_label; } gpu_texture; +#endif
\ No newline at end of file diff --git a/src/renderer/backends/metal/backend_metal.h b/src/renderer/backends/metal/backend_metal.h new file mode 100644 index 0000000..9561bb6 --- /dev/null +++ b/src/renderer/backends/metal/backend_metal.h @@ -0,0 +1,74 @@ +#pragma once +// #define CEL_REND_BACKEND_METAL +#if defined(CEL_REND_BACKEND_METAL) + +#include "defines.h" +#include "maths_types.h" +#ifdef __OBJC__ +#import <Foundation/Foundation.h> +#import <Metal/Metal.h> +#import <MetalKit/MetalKit.h> +#import <QuartzCore/CAMetalLayer.h> +#else +typedef void* id; +#endif + +typedef struct gpu_swapchain { + u32x2 dimensions; +#ifdef __OBJC__ + CAMetalLayer* swapchain; +#else + void* swapchain; +#endif +} gpu_swapchain; +typedef struct gpu_device { +/** @brief `device` gives us access to our GPU */ +#ifdef __OBJC__ + id<MTLDevice> id; +#else + void* id; +#endif +} gpu_device; +typedef struct gpu_pipeline_layout { + void* pad; +} gpu_pipeline_layout; +typedef struct gpu_pipeline { +#ifdef __OBJC__ + id<MTLRenderPipelineState> pipeline_state; +#else + void* pipeline_state; +#endif +} gpu_pipeline; +typedef struct gpu_renderpass { +#ifdef __OBJC__ + MTLRenderPassDescriptor* rpass_descriptor; +#else + void* rpass_descriptor; +#endif +} gpu_renderpass; +typedef struct gpu_cmd_encoder { +#ifdef __OBJC__ + id<MTLCommandBuffer> cmd_buffer; + id<MTLRenderCommandEncoder> render_encoder; +#else + void* cmd_buffer; + void* render_encoder; +#endif +} gpu_cmd_encoder; +typedef struct gpu_cmd_buffer { + void* pad; +} gpu_cmd_buffer; + +typedef struct gpu_buffer { +#ifdef __OBJC__ + id<MTLBuffer> id; +#else + void* id; +#endif + u64 size; +} gpu_buffer; +typedef struct gpu_texture { + void* pad; +} gpu_texture; + +#endif
\ No newline at end of file diff --git a/src/renderer/backends/metal/backend_metal.m b/src/renderer/backends/metal/backend_metal.m new file mode 100644 index 0000000..0e9399e --- /dev/null +++ b/src/renderer/backends/metal/backend_metal.m @@ -0,0 +1,285 @@ +#include <assert.h> +#define CEL_REND_BACKEND_METAL +#if defined(CEL_REND_BACKEND_METAL) +#include <stddef.h> +#include "ral_types.h" +#include "colours.h" +#include <stdlib.h> +#include "camera.h" +#include "defines.h" +#include "file.h" +#include "log.h" +#include "maths_types.h" +#include "ral.h" + +#define GLFW_INCLUDE_NONE +#define GLFW_EXPOSE_NATIVE_COCOA + +#include <GLFW/glfw3.h> +#include <GLFW/glfw3native.h> + +#import <Foundation/Foundation.h> +#import <Metal/Metal.h> +#import <MetalKit/MetalKit.h> +#import <QuartzCore/CAMetalLayer.h> +#include "backend_metal.h" + +// --- Handy macros +#define BUFFER_GET(h) (buffer_pool_get(&context.resource_pools->buffers, h)) +#define TEXTURE_GET(h) (texture_pool_get(&context.resource_pools->textures, h)) + +typedef struct metal_context { + GLFWwindow* window; + NSWindow* metal_window; + arena pool_arena; + + gpu_device* device; + gpu_swapchain* swapchain; + id<CAMetalDrawable> surface; + + id<MTLCommandQueue> command_queue; + gpu_cmd_encoder main_command_buf; + gpu_backend_pools gpu_pools; + struct resource_pools* resource_pools; +} metal_context; + +static metal_context context; + +struct GLFWwindow; + +bool gpu_backend_init(const char *window_name, struct GLFWwindow *window) { + INFO("loading Metal backend"); + + memset(&context, 0, sizeof(metal_context)); + context.window = window; + + size_t pool_buffer_size = 1024 * 1024; + context.pool_arena = arena_create(malloc(pool_buffer_size), pool_buffer_size); + + backend_pools_init(&context.pool_arena, &context.gpu_pools); + context.resource_pools = malloc(sizeof(struct resource_pools)); + resource_pools_init(&context.pool_arena, context.resource_pools); + + glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API); + + glfwMakeContextCurrent(window); + // FIXME: glfwSetFramebufferSizeCallback(ren->window, framebuffer_size_callback); + + // get a NSWindow pointer from GLFWwindow + NSWindow *nswindow = glfwGetCocoaWindow(window); + context.metal_window = nswindow; + + // const id<MTLCommandQueue> queue = [gpu newCommandQueue]; + // CAMetalLayer *swapchain = [CAMetalLayer layer]; + // swapchain.device = gpu; + // swapchain.opaque = YES; + + // // set swapchain for the window + // nswindow.contentView.layer = swapchain; + // nswindow.contentView.wantsLayer = YES; + + // MTLClearColor color = MTLClearColorMake(0.7, 0.1, 0.2, 1.0); + + // // set all our state properties + // state->device = gpu; + // state->cmd_queue = queue; + // state->swapchain = swapchain; + // state->clear_color = color; + + // NSError *err = 0x0; // TEMPORARY + + // WARN("About to try loading metallib"); + // id<MTLLibrary> defaultLibrary = [state->device newLibraryWithFile: @"build/gfx.metallib" error:&err]; + // CASSERT(defaultLibrary); + // state->default_lib = defaultLibrary; + // if (!state->default_lib) { + // NSLog(@"Failed to load library"); + // exit(0); + // } + + // create_render_pipeline(state); + + return true; +} + +void gpu_backend_shutdown() {} + +bool gpu_device_create(gpu_device* out_device) { + TRACE("GPU Device creation"); + const id<MTLDevice> gpu = MTLCreateSystemDefaultDevice(); + out_device->id = gpu; + context.device = out_device; + + const id<MTLCommandQueue> queue = [gpu newCommandQueue]; + context.command_queue = queue; + + return true; +} +void gpu_device_destroy() {} + +// --- Render Pipeline +gpu_pipeline* gpu_graphics_pipeline_create(struct graphics_pipeline_desc description) { + TRACE("GPU Graphics Pipeline creation"); + // Allocate + // gpu_pipeline_layout* layout = + // pipeline_layout_pool_alloc(&context.gpu_pools.pipeline_layouts, NULL); + gpu_pipeline* pipeline = pipeline_pool_alloc(&context.gpu_pools.pipelines, NULL); + + WARN("About to try loading metallib"); + assert(description.vs.is_combined_vert_frag); + // Ignore fragment shader data, as vert shader data contains both + NSError *err = 0x0; // TEMPORARY + NSString *myNSString = [NSString stringWithUTF8String:(char*)description.vs.filepath.buf]; + id<MTLLibrary> default_library = [context.device->id newLibraryWithFile:myNSString error:&err]; + assert(default_library); + + // setup vertex and fragment shaders + id<MTLFunction> ren_vert = [default_library newFunctionWithName:@"basic_vertex"]; + assert(ren_vert); + id<MTLFunction> ren_frag = [default_library newFunctionWithName:@"basic_fragment"]; + assert(ren_frag); + + // create pipeline descriptor + @autoreleasepool { + NSError *err = 0x0; + MTLRenderPipelineDescriptor *pld = [[MTLRenderPipelineDescriptor alloc] init]; + NSString *pipeline_name = [NSString stringWithUTF8String: description.debug_name]; + pld.label = pipeline_name; + pld.vertexFunction = ren_vert; + pld.fragmentFunction = ren_frag; + pld.colorAttachments[0].pixelFormat = MTLPixelFormatBGRA8Unorm; + pld.colorAttachments[0].blendingEnabled = YES; + + MTLDepthStencilDescriptor *depthStencilDescriptor = [MTLDepthStencilDescriptor new]; + depthStencilDescriptor.depthCompareFunction = MTLCompareFunctionLess; + depthStencilDescriptor.depthWriteEnabled = YES; + pld.depthAttachmentPixelFormat = MTLPixelFormatDepth32Float_Stencil8; + + id<MTLDepthStencilState> depth_descriptor = [context.device->id newDepthStencilStateWithDescriptor:depthStencilDescriptor]; + // FIXME: state->depth_state = depth_descriptor; + + id<MTLRenderPipelineState> pipeline_state = [context.device->id newRenderPipelineStateWithDescriptor:pld error:&err]; + TRACE("created renderpipelinestate"); + pipeline->pipeline_state = pipeline_state; + + } + + return pipeline; +} +void gpu_pipeline_destroy(gpu_pipeline* pipeline) {} + +// --- Renderpass +gpu_renderpass* gpu_renderpass_create(const gpu_renderpass_desc* description) { + gpu_renderpass* renderpass = renderpass_pool_alloc(&context.gpu_pools.renderpasses, NULL); + + // TODO: Configure based on description + // set up render pass + context.surface = [context.swapchain->swapchain nextDrawable]; + MTLRenderPassDescriptor *renderPassDescriptor = [[MTLRenderPassDescriptor alloc] init]; + MTLRenderPassColorAttachmentDescriptor *cd = renderPassDescriptor.colorAttachments[0]; + [cd setTexture:context.surface.texture]; + [cd setLoadAction:MTLLoadActionClear]; + MTLClearColor clearColor = MTLClearColorMake(0.1, 0.1, 0.0, 1.0); + [cd setClearColor:clearColor]; + [cd setStoreAction:MTLStoreActionStore]; + + renderpass->rpass_descriptor = renderPassDescriptor; + + return renderpass; +} + +void gpu_renderpass_destroy(gpu_renderpass* pass) {} + +// --- Swapchain +bool gpu_swapchain_create(gpu_swapchain* out_swapchain) { + TRACE("GPU Swapchain creation"); + CAMetalLayer *swapchain = [CAMetalLayer layer]; + swapchain.device = context.device->id; + swapchain.opaque = YES; + out_swapchain->swapchain = swapchain; + + // set swapchain for the window + context.metal_window.contentView.layer = swapchain; + context.metal_window.contentView.wantsLayer = YES; + + context.swapchain = out_swapchain; + return true; +} +void gpu_swapchain_destroy(gpu_swapchain* swapchain) {} + +// --- Command buffer +gpu_cmd_encoder gpu_cmd_encoder_create() { + id <MTLCommandBuffer> cmd_buffer = [context.command_queue commandBuffer]; + + return (gpu_cmd_encoder) { + .cmd_buffer = cmd_buffer + }; +} +void gpu_cmd_encoder_destroy(gpu_cmd_encoder* encoder) {} +void gpu_cmd_encoder_begin(gpu_cmd_encoder encoder) { /* no-op */ } +void gpu_cmd_encoder_begin_render(gpu_cmd_encoder* encoder, gpu_renderpass* renderpass) { + DEBUG("Create Render Command Encoder"); + id<MTLRenderCommandEncoder> render_encoder = [encoder->cmd_buffer renderCommandEncoderWithDescriptor:renderpass->rpass_descriptor]; + encoder->render_encoder = render_encoder; + // [encoder setDepthStencilState:state->depth_state]; +} +void gpu_cmd_encoder_end_render(gpu_cmd_encoder* encoder) {} +void gpu_cmd_encoder_begin_compute() {} +gpu_cmd_encoder* gpu_get_default_cmd_encoder() { + return &context.main_command_buf; +} + +/** @brief Finish recording and return a command buffer that can be submitted to a queue */ +gpu_cmd_buffer gpu_cmd_encoder_finish(gpu_cmd_encoder* encoder) {} + +void gpu_queue_submit(gpu_cmd_buffer* buffer) {} + +void encode_buffer_copy(gpu_cmd_encoder* encoder, buffer_handle src, u64 src_offset, + buffer_handle dst, u64 dst_offset, u64 copy_size); +void buffer_upload_bytes(buffer_handle gpu_buf, bytebuffer cpu_buf, u64 offset, u64 size); + +void copy_buffer_to_buffer_oneshot(buffer_handle src, u64 src_offset, buffer_handle dst, + u64 dst_offset, u64 copy_size); +void copy_buffer_to_image_oneshot(buffer_handle src, texture_handle dst); + +void encode_bind_pipeline(gpu_cmd_encoder* encoder, pipeline_kind kind, gpu_pipeline* pipeline) {} +void encode_bind_shader_data(gpu_cmd_encoder* encoder, u32 group, shader_data* data) {} +void encode_set_default_settings(gpu_cmd_encoder* encoder) { + [encoder->render_encoder setCullMode:MTLCullModeBack]; +} +void encode_set_vertex_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) { + gpu_buffer* vertex_buf = BUFFER_GET(buf); + [encoder->render_encoder setVertexBuffer:vertex_buf->id offset:0 atIndex:0]; +} +void encode_set_index_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) {} +void encode_set_bind_group() {} +void encode_draw(gpu_cmd_encoder* encoder) {} +void encode_draw_indexed(gpu_cmd_encoder* encoder, u64 index_count) {} +void encode_clear_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) {} + +buffer_handle gpu_buffer_create(u64 size, gpu_buffer_type buf_type, gpu_buffer_flags flags, + const void* data) { + buffer_handle handle; + gpu_buffer* buffer = buffer_pool_alloc(&context.resource_pools->buffers, &handle); + buffer->size = size; + + id<MTLBuffer> mtl_vert_buf = [context.device->id newBufferWithBytes:data + length: size + options:MTLResourceStorageModeShared]; + return handle; +} +void gpu_buffer_destroy(buffer_handle buffer) {} +void gpu_buffer_upload(const void* data) {} + +texture_handle gpu_texture_create(texture_desc desc, bool create_view, const void* data) {} +void gpu_texture_destroy(texture_handle) {} +void gpu_texture_upload(texture_handle texture, const void* data) {} + +bool gpu_backend_begin_frame() { + context.main_command_buf.cmd_buffer = [context.command_queue commandBuffer]; + return true; + } +void gpu_backend_end_frame() {} +void gpu_temp_draw(size_t n_verts) {} + +#endif
\ No newline at end of file diff --git a/src/renderer/backends/opengl/backend_opengl.c b/src/renderer/backends/opengl/backend_opengl.c new file mode 100644 index 0000000..17587a2 --- /dev/null +++ b/src/renderer/backends/opengl/backend_opengl.c @@ -0,0 +1,391 @@ +#include <stddef.h> +#include <stdio.h> +#include "colours.h" +#include "opengl_helpers.h" +#include "ral_types.h" +#if defined(CEL_REND_BACKEND_OPENGL) +#include <assert.h> +#include <stdlib.h> + +#include "backend_opengl.h" +#include "defines.h" +#include "file.h" +#include "log.h" +#include "maths_types.h" +#include "ral.h" + +#include <glad/glad.h> +#include <glfw3.h> + +typedef struct opengl_context { + GLFWwindow* window; + arena pool_arena; + gpu_cmd_encoder command_buffer; + gpu_backend_pools gpu_pools; + struct resource_pools* resource_pools; +} opengl_context; + +static opengl_context context; + +struct GLFWwindow; + +bool gpu_backend_init(const char* window_name, struct GLFWwindow* window) { + INFO("loading OpenGL backend"); + + memset(&context, 0, sizeof(opengl_context)); + context.window = window; + + size_t pool_buffer_size = 1024 * 1024; + context.pool_arena = arena_create(malloc(pool_buffer_size), pool_buffer_size); + + backend_pools_init(&context.pool_arena, &context.gpu_pools); + context.resource_pools = malloc(sizeof(struct resource_pools)); + resource_pools_init(&context.pool_arena, context.resource_pools); + + glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4); + glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3); + glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); + glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); + + // glad: load all opengl function pointers + if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress)) { + ERROR("Failed to initialise GLAD \n"); + return false; + } + + glEnable(GL_DEPTH_TEST); + // glFrontFace(GL_CW); + + return true; +} + +void gpu_backend_shutdown() {} + +bool gpu_device_create(gpu_device* out_device) { /* No-op in OpenGL */ } +void gpu_device_destroy() { /* No-op in OpenGL */ } + +// --- Render Pipeline +gpu_pipeline* gpu_graphics_pipeline_create(struct graphics_pipeline_desc description) { + gpu_pipeline* pipeline = pipeline_pool_alloc(&context.gpu_pools.pipelines, NULL); + + // Create shader program + u32 shader_id = shader_create_separate(description.vs.filepath.buf, description.fs.filepath.buf); + pipeline->shader_id = shader_id; + + // Vertex format + pipeline->vertex_desc = description.vertex_desc; + + // Allocate uniform buffers if needed + printf("data layouts %d\n", description.data_layouts_count); + for (u32 layout_i = 0; layout_i < description.data_layouts_count; layout_i++) { + shader_data_layout sdl = description.data_layouts[layout_i].shader_data_get_layout(NULL); + TRACE("Got shader data layout %d's bindings! . found %d", layout_i, sdl.bindings_count); + + for (u32 binding_j = 0; binding_j < sdl.bindings_count; binding_j++) { + u32 binding_id = binding_j; + assert(binding_id < MAX_PIPELINE_UNIFORM_BUFFERS); + shader_binding binding = sdl.bindings[binding_j]; + if (binding.type == SHADER_BINDING_BYTES) { + buffer_handle ubo_handle = + gpu_buffer_create(binding.data.bytes.size, CEL_BUFFER_UNIFORM, CEL_BUFFER_FLAG_GPU, + NULL); // no data right now + pipeline->uniform_bindings[binding_id] = ubo_handle; + gpu_buffer* ubo_buf = BUFFER_GET(ubo_handle); + + u32 blockIndex = glGetUniformBlockIndex(pipeline->shader_id, "Matrices"); + printf("Block index for Matrices: %d", blockIndex); + u32 blocksize; + glGetActiveUniformBlockiv(pipeline->shader_id, blockIndex, GL_UNIFORM_BLOCK_DATA_SIZE, + &blocksize); + printf("\t with size %d bytes\n", blocksize); + + glBindBuffer(GL_UNIFORM_BUFFER, ubo_buf->id.ubo); + glBindBufferBase(GL_UNIFORM_BUFFER, binding_j, ubo_buf->id.ubo); + if (blockIndex != GL_INVALID_INDEX) { + printf("Here\n"); + glUniformBlockBinding(pipeline->shader_id, blockIndex, 0); + } + + // Now we want to store a handle associated with the shader for this + } + } + } + + return pipeline; +} +void gpu_pipeline_destroy(gpu_pipeline* pipeline) {} + +// --- Renderpass +gpu_renderpass* gpu_renderpass_create(const gpu_renderpass_desc* description) {} +void gpu_renderpass_destroy(gpu_renderpass* pass) {} + +// --- Swapchain +bool gpu_swapchain_create(gpu_swapchain* out_swapchain) {} +void gpu_swapchain_destroy(gpu_swapchain* swapchain) {} + +// --- Command buffer +gpu_cmd_encoder gpu_cmd_encoder_create() { + gpu_cmd_encoder encoder = { 0 }; + return encoder; +} +void gpu_cmd_encoder_destroy(gpu_cmd_encoder* encoder) {} +void gpu_cmd_encoder_begin(gpu_cmd_encoder encoder) {} +void gpu_cmd_encoder_begin_render(gpu_cmd_encoder* encoder, gpu_renderpass* renderpass) { + rgba clear_colour = STONE_900; + glClearColor(clear_colour.r, clear_colour.g, clear_colour.b, 1.0f); + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); +} +void gpu_cmd_encoder_end_render(gpu_cmd_encoder* encoder) {} +void gpu_cmd_encoder_begin_compute() {} +gpu_cmd_encoder* gpu_get_default_cmd_encoder() { return &context.command_buffer; } + +/** @brief Finish recording and return a command buffer that can be submitted to a queue */ +gpu_cmd_buffer gpu_cmd_encoder_finish(gpu_cmd_encoder* encoder) {} + +void gpu_queue_submit(gpu_cmd_buffer* buffer) {} + +// --- Data copy commands +/** @brief Copy data from one buffer to another */ +void encode_buffer_copy(gpu_cmd_encoder* encoder, buffer_handle src, u64 src_offset, + buffer_handle dst, u64 dst_offset, u64 copy_size) {} +/** @brief Upload CPU-side data as array of bytes to a GPU buffer */ +void buffer_upload_bytes(buffer_handle gpu_buf, bytebuffer cpu_buf, u64 offset, u64 size) { + // TODO: finish implementing this + gpu_buffer* buf = BUFFER_GET(gpu_buf); +} + +/** @brief Copy data from buffer to buffer using a one time submit command buffer and a wait */ +void copy_buffer_to_buffer_oneshot(buffer_handle src, u64 src_offset, buffer_handle dst, + u64 dst_offset, u64 copy_size) {} +/** @brief Copy data from buffer to an image using a one time submit command buffer */ +void copy_buffer_to_image_oneshot(buffer_handle src, texture_handle dst) {} + +// --- Render commands +void encode_bind_pipeline(gpu_cmd_encoder* encoder, pipeline_kind kind, gpu_pipeline* pipeline) { + encoder->pipeline = pipeline; + // In OpenGL binding a pipeline is more or less equivalent to just setting the shader + glUseProgram(pipeline->shader_id); +} +void encode_bind_shader_data(gpu_cmd_encoder* encoder, u32 group, shader_data* data) { + shader_data_layout sdl = data->shader_data_get_layout(data->data); + + for (u32 i = 0; i < sdl.bindings_count; i++) { + shader_binding binding = sdl.bindings[i]; + /* print_shader_binding(binding); */ + + if (binding.type == SHADER_BINDING_BYTES) { + buffer_handle b = encoder->pipeline->uniform_bindings[i]; + gpu_buffer* ubo_buf = BUFFER_GET(b); + glBindBuffer(GL_UNIFORM_BUFFER, ubo_buf->id.ubo); + glBufferSubData(GL_UNIFORM_BUFFER, 0, ubo_buf->size, data->data); + /* printf("Size %d\n", ubo_buf->size); */ + + /* glBindBuffer(GL_UNIFORM_BUFFER, 0); */ + } + } +} +void encode_set_default_settings(gpu_cmd_encoder* encoder) {} +void encode_set_vertex_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) { + gpu_buffer* buffer = BUFFER_GET(buf); + if (buffer->vao == 0) { // if no VAO for this vertex buffer, create it + INFO("Setting up VAO"); + buffer->vao = opengl_bindcreate_vao(buffer, encoder->pipeline->vertex_desc); + } + glBindVertexArray(buffer->vao); +} +void encode_set_index_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) { + gpu_buffer* buffer = BUFFER_GET(buf); + glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer->id.ibo); +} +void encode_draw(gpu_cmd_encoder* encoder, u64 count) { glDrawArrays(GL_TRIANGLES, 0, count); } +void encode_draw_indexed(gpu_cmd_encoder* encoder, u64 index_count) { + glDrawElements(GL_TRIANGLES, index_count, GL_UNSIGNED_INT, 0); +} +void encode_clear_buffer(gpu_cmd_encoder* encoder, buffer_handle buf) {} + +// --- Buffers +buffer_handle gpu_buffer_create(u64 size, gpu_buffer_type buf_type, gpu_buffer_flags flags, + const void* data) { + // "allocating" the cpu-side buffer struct + buffer_handle handle; + gpu_buffer* buffer = buffer_pool_alloc(&context.resource_pools->buffers, &handle); + buffer->size = size; + buffer->vao = 0; // When we create a new buffer, there will be no VAO. + + // Opengl buffer + GLuint gl_buffer_id; + glGenBuffers(1, &gl_buffer_id); + + GLenum gl_buf_type; + GLenum gl_buf_usage = GL_STATIC_DRAW; + + switch (buf_type) { + case CEL_BUFFER_UNIFORM: + DEBUG("Creating Uniform buffer"); + gl_buf_type = GL_UNIFORM_BUFFER; + /* gl_buf_usage = GL_DYNAMIC_DRAW; */ + buffer->id.ubo = gl_buffer_id; + break; + case CEL_BUFFER_DEFAULT: + case CEL_BUFFER_VERTEX: + DEBUG("Creating Vertex buffer"); + gl_buf_type = GL_ARRAY_BUFFER; + buffer->id.vbo = gl_buffer_id; + break; + case CEL_BUFFER_INDEX: + DEBUG("Creating Index buffer"); + gl_buf_type = GL_ELEMENT_ARRAY_BUFFER; + buffer->id.ibo = gl_buffer_id; + break; + default: + WARN("Unimplemented gpu_buffer_type provided %s", buffer_type_names[buf_type]); + break; + } + // bind buffer + glBindBuffer(gl_buf_type, gl_buffer_id); + + if (data) { + TRACE("Upload data (%d bytes) as part of buffer creation", size); + glBufferData(gl_buf_type, buffer->size, data, gl_buf_usage); + } else { + TRACE("Allocating the correct size anyway"); + glBufferData(gl_buf_type, buffer->size, NULL, gl_buf_usage); + } + + glBindBuffer(gl_buf_type, 0); + + return handle; +} + +void gpu_buffer_destroy(buffer_handle buffer) {} +void gpu_buffer_upload(const void* data) {} + +texture_handle gpu_texture_create(texture_desc desc, bool create_view, const void* data) {} +void gpu_texture_destroy(texture_handle) {} +void gpu_texture_upload(texture_handle texture, const void* data) {} + +// --- Vertex formats +bytebuffer vertices_as_bytebuffer(arena* a, vertex_format format, vertex_darray* vertices) {} + +// --- TEMP +bool gpu_backend_begin_frame() { return true; } +void gpu_backend_end_frame() { + // TODO: Reset all bindings + glfwSwapBuffers(context.window); +} +void gpu_temp_draw(size_t n_verts) {} + +u32 shader_create_separate(const char* vert_shader, const char* frag_shader) { + INFO("Load shaders at %s and %s", vert_shader, frag_shader); + int success; + char info_log[512]; + + u32 vertex = glCreateShader(GL_VERTEX_SHADER); + const char* vertex_shader_src = string_from_file(vert_shader); + if (vertex_shader_src == NULL) { + ERROR("EXIT: couldnt load shader"); + exit(-1); + } + glShaderSource(vertex, 1, &vertex_shader_src, NULL); + glCompileShader(vertex); + glGetShaderiv(vertex, GL_COMPILE_STATUS, &success); + if (!success) { + glGetShaderInfoLog(vertex, 512, NULL, info_log); + printf("%s\n", info_log); + ERROR("EXIT: vertex shader compilation failed"); + exit(-1); + } + + // fragment shader + u32 fragment = glCreateShader(GL_FRAGMENT_SHADER); + const char* fragment_shader_src = string_from_file(frag_shader); + if (fragment_shader_src == NULL) { + ERROR("EXIT: couldnt load shader"); + exit(-1); + } + glShaderSource(fragment, 1, &fragment_shader_src, NULL); + glCompileShader(fragment); + glGetShaderiv(fragment, GL_COMPILE_STATUS, &success); + if (!success) { + glGetShaderInfoLog(fragment, 512, NULL, info_log); + printf("%s\n", info_log); + ERROR("EXIT: fragment shader compilation failed"); + exit(-1); + } + + u32 shader_prog; + shader_prog = glCreateProgram(); + + glAttachShader(shader_prog, vertex); + glAttachShader(shader_prog, fragment); + glLinkProgram(shader_prog); + glDeleteShader(vertex); + glDeleteShader(fragment); + free((char*)vertex_shader_src); + free((char*)fragment_shader_src); + + return shader_prog; +} + +inline void uniform_vec3f(u32 program_id, const char* uniform_name, vec3* value) { + glUniform3fv(glGetUniformLocation(program_id, uniform_name), 1, &value->x); +} +inline void uniform_f32(u32 program_id, const char* uniform_name, f32 value) { + glUniform1f(glGetUniformLocation(program_id, uniform_name), value); +} +inline void uniform_i32(u32 program_id, const char* uniform_name, i32 value) { + glUniform1i(glGetUniformLocation(program_id, uniform_name), value); +} +inline void uniform_mat4f(u32 program_id, const char* uniform_name, mat4* value) { + glUniformMatrix4fv(glGetUniformLocation(program_id, uniform_name), 1, GL_FALSE, value->data); +} + +// void clear_screen(vec3 colour) { +// glClearColor(colour.x, colour.y, colour.z, 1.0f); +// glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); +// } + +// void texture_data_upload(texture *tex) { +// printf("Texture name %s\n", tex->name); +// TRACE("Upload texture data"); +// u32 texture_id; +// glGenTextures(1, &texture_id); +// glBindTexture(GL_TEXTURE_2D, texture_id); +// tex->texture_id = texture_id; + +// // set the texture wrapping parameters +// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, +// GL_REPEAT); // set texture wrapping to GL_REPEAT (default wrapping method) +// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); +// // set texture filtering parameters +// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); +// glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + +// glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, tex->width, tex->height, 0, tex->channel_type, +// GL_UNSIGNED_BYTE, tex->image_data); +// glGenerateMipmap(GL_TEXTURE_2D); +// DEBUG("Freeing texture image data after uploading to GPU"); +// // stbi_image_free(tex->image_data); // data is on gpu now so we dont need it around +// } + +// void bind_texture(shader s, texture *tex, u32 slot) { +// // printf("bind texture slot %d with texture id %d \n", slot, tex->texture_id); +// glActiveTexture(GL_TEXTURE0 + slot); +// glBindTexture(GL_TEXTURE_2D, tex->texture_id); +// } + +// void bind_mesh_vertex_buffer(void *_backend, mesh *mesh) { glBindVertexArray(mesh->vao); } + +// static inline GLenum to_gl_prim_topology(enum cel_primitive_topology primitive) { +// switch (primitive) { +// case CEL_PRIMITIVE_TOPOLOGY_TRIANGLE: +// return GL_TRIANGLES; +// case CEL_PRIMITIVE_TOPOLOGY_POINT: +// case CEL_PRIMITIVE_TOPOLOGY_LINE: +// case CEL_PRIMITIVE_TOPOLOGY_LINE_STRIP: +// case CEL_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP: +// case CEL_PRIMITIVE_TOPOLOGY_COUNT: +// break; +// } +// } +#endif diff --git a/src/renderer/backends/opengl/backend_opengl.h b/src/renderer/backends/opengl/backend_opengl.h new file mode 100644 index 0000000..91de38d --- /dev/null +++ b/src/renderer/backends/opengl/backend_opengl.h @@ -0,0 +1,53 @@ +#pragma once + +#ifdef CEL_REND_BACKEND_OPENGL + +#include "defines.h" +#include "maths_types.h" +#include "ral_types.h" + +#define MAX_PIPELINE_UNIFORM_BUFFERS 32 + +typedef struct gpu_swapchain { + u32x2 dimensions; +} gpu_swapchain; +typedef struct gpu_device { +} gpu_device; +typedef struct gpu_pipeline_layout { + void *pad +} gpu_pipeline_layout; +typedef struct gpu_pipeline { + u32 shader_id; + vertex_description vertex_desc; + buffer_handle uniform_bindings[MAX_PIPELINE_UNIFORM_BUFFERS]; +} gpu_pipeline; +typedef struct gpu_renderpass { + void *pad +} gpu_renderpass; +typedef struct gpu_cmd_encoder { + gpu_pipeline *pipeline; +} gpu_cmd_encoder; // Recording +typedef struct gpu_cmd_buffer { + void *pad +} gpu_cmd_buffer; // Ready for submission + +typedef struct gpu_buffer { + union { + u32 vbo; + u32 ibo; + u32 ubo; + } id; + u32 vao; // Optional + u64 size; +} gpu_buffer; +typedef struct gpu_texture { + void *pad +} gpu_texture; + +u32 shader_create_separate(const char *vert_shader, const char *frag_shader); + +void uniform_vec3f(u32 program_id, const char *uniform_name, vec3 *value); +void uniform_f32(u32 program_id, const char *uniform_name, f32 value); +void uniform_i32(u32 program_id, const char *uniform_name, i32 value); +void uniform_mat4f(u32 program_id, const char *uniform_name, mat4 *value); +#endif diff --git a/src/renderer/backends/opengl/opengl_helpers.h b/src/renderer/backends/opengl/opengl_helpers.h new file mode 100644 index 0000000..a3c4014 --- /dev/null +++ b/src/renderer/backends/opengl/opengl_helpers.h @@ -0,0 +1,73 @@ +#if defined(CEL_REND_BACKEND_OPENGL) +#pragma once +#include "backend_opengl.h" +#include "log.h" +#include "ral.h" +#include "ral_types.h" + +#include <glad/glad.h> +#include <glfw3.h> +#include "ral_types.h" +typedef struct opengl_vertex_attr { + u32 count; + GLenum data_type; +} opengl_vertex_attr; + +static opengl_vertex_attr format_from_vertex_attr(vertex_attrib_type attr) { + switch (attr) { + case ATTR_F32: + return (opengl_vertex_attr){ .count = 1, .data_type = GL_FLOAT }; + case ATTR_U32: + return (opengl_vertex_attr){ .count = 1, .data_type = GL_UNSIGNED_INT }; + case ATTR_I32: + return (opengl_vertex_attr){ .count = 1, .data_type = GL_INT }; + case ATTR_F32x2: + return (opengl_vertex_attr){ .count = 2, .data_type = GL_FLOAT }; + case ATTR_U32x2: + // return VK_FORMAT_R32G32_UINT; + case ATTR_I32x2: + // return VK_FORMAT_R32G32_UINT; + case ATTR_F32x3: + return (opengl_vertex_attr){ .count = 3, .data_type = GL_FLOAT }; + case ATTR_U32x3: + // return VK_FORMAT_R32G32B32_UINT; + case ATTR_I32x3: + // return VK_FORMAT_R32G32B32_SINT; + case ATTR_F32x4: + return (opengl_vertex_attr){ .count = 4, .data_type = GL_FLOAT }; + case ATTR_U32x4: + // return VK_FORMAT_R32G32B32A32_UINT; + case ATTR_I32x4: + return (opengl_vertex_attr){ .count = 4, .data_type = GL_INT }; + } +} + +static u32 opengl_bindcreate_vao(gpu_buffer* buf, vertex_description desc) { + // 1. Bind the buffer + glBindBuffer(GL_ARRAY_BUFFER, buf->id.vbo); + // 2. Create new VAO + u32 vao; + glGenVertexArrays(1, &vao); + glBindVertexArray(vao); + + // Attributes + u32 attr_count = desc.attributes_count; + printf("N attributes %d\n", attr_count); + u64 offset = 0; + size_t vertex_size = desc.use_full_vertex_size ? sizeof(vertex) : desc.stride; + for (u32 i = 0; i < desc.attributes_count; i++) { + opengl_vertex_attr format = format_from_vertex_attr(desc.attributes[i]); + glVertexAttribPointer(i, format.count, format.data_type, GL_FALSE, vertex_size, (void*)offset); + TRACE(" %d %d %d %d %d %s", i, format.count, format.data_type, vertex_size, offset, + desc.attr_names[i]); + glEnableVertexAttribArray(i); // nth index + size_t this_offset = vertex_attrib_size(desc.attributes[i]); + printf("offset total %lld this attr %ld\n", offset, this_offset); + offset += this_offset; + } + glBindBuffer(GL_ARRAY_BUFFER, 0); + + return vao; +} + +#endif diff --git a/src/renderer/ral.c b/src/renderer/ral.c index 7d868be..123c932 100644 --- a/src/renderer/ral.c +++ b/src/renderer/ral.c @@ -1,5 +1,13 @@ #include "ral.h" +#if defined(CEL_REND_BACKEND_VULKAN) +#include "backend_vulkan.h" +#elif defined(CEL_REND_BACKEND_METAL) +#include "backend_metal.h" +#elif defined(CEL_REND_BACKEND_OPENGL) +#include "backend_opengl.h" +#endif + size_t vertex_attrib_size(vertex_attrib_type attr) { switch (attr) { case ATTR_F32: @@ -40,3 +48,29 @@ vertex_description static_3d_vertex_description() { vertex_desc_add(&builder, "texCoords", ATTR_F32x2); return builder; } + +void backend_pools_init(arena* a, gpu_backend_pools* backend_pools) { + pipeline_layout_pool pipeline_layout_pool = + pipeline_layout_pool_create(a, MAX_PIPELINES, sizeof(gpu_pipeline_layout)); + backend_pools->pipeline_layouts = pipeline_layout_pool; + pipeline_pool pipeline_pool = pipeline_pool_create(a, MAX_PIPELINES, sizeof(gpu_pipeline)); + backend_pools->pipelines = pipeline_pool; + renderpass_pool rpass_pool = renderpass_pool_create(a, MAX_RENDERPASSES, sizeof(gpu_renderpass)); + backend_pools->renderpasses = rpass_pool; + + // context.gpu_pools; +} + +void resource_pools_init(arena* a, struct resource_pools* res_pools) { + buffer_pool buf_pool = buffer_pool_create(a, MAX_BUFFERS, sizeof(gpu_buffer)); + res_pools->buffers = buf_pool; + texture_pool tex_pool = texture_pool_create(a, MAX_TEXTURES, sizeof(gpu_texture)); + res_pools->textures = tex_pool; + + // context.resource_pools = res_pools; +} + +void print_shader_binding(shader_binding b) { + printf("Binding name: %s type %s vis %d stores data %d\n", b.label, + shader_binding_type_name[b.type], b.vis, b.stores_data); +} diff --git a/src/renderer/ral.h b/src/renderer/ral.h index 1ca37b4..067847b 100644 --- a/src/renderer/ral.h +++ b/src/renderer/ral.h @@ -44,17 +44,23 @@ TYPED_POOL(gpu_pipeline_layout, pipeline_layout); TYPED_POOL(gpu_pipeline, pipeline); TYPED_POOL(gpu_renderpass, renderpass); +// --- Handy macros +#define BUFFER_GET(h) (buffer_pool_get(&context.resource_pools->buffers, h)) +#define TEXTURE_GET(h) (texture_pool_get(&context.resource_pools->textures, h)) + // --- Pools typedef struct gpu_backend_pools { pipeline_pool pipelines; pipeline_layout_pool pipeline_layouts; renderpass_pool renderpasses; } gpu_backend_pools; +void backend_pools_init(arena* a, gpu_backend_pools* backend_pools); struct resource_pools { buffer_pool buffers; texture_pool textures; }; +void resource_pools_init(arena* a, struct resource_pools* res_pools); // --- Pipeline description typedef enum pipeline_kind { @@ -64,9 +70,10 @@ typedef enum pipeline_kind { typedef struct shader_desc { const char* debug_name; - str8 filepath; // where it came from + str8 filepath; // Where it came from str8 code; // Either GLSL or SPIRV bytecode bool is_spirv; + bool is_combined_vert_frag; // Contains both vertex and fragment stages } shader_desc; struct graphics_pipeline_desc { @@ -146,7 +153,7 @@ void encode_set_default_settings(gpu_cmd_encoder* encoder); void encode_set_vertex_buffer(gpu_cmd_encoder* encoder, buffer_handle buf); void encode_set_index_buffer(gpu_cmd_encoder* encoder, buffer_handle buf); void encode_set_bind_group(); // TODO -void encode_draw(gpu_cmd_encoder* encoder); +void encode_draw(gpu_cmd_encoder* encoder, u64 count); void encode_draw_indexed(gpu_cmd_encoder* encoder, u64 index_count); void encode_clear_buffer(gpu_cmd_encoder* encoder, buffer_handle buf); @@ -179,3 +186,4 @@ void gpu_temp_draw(size_t n_verts); // --- Helpers vertex_description static_3d_vertex_description(); +size_t vertex_attrib_size(vertex_attrib_type attr); diff --git a/src/renderer/ral_types.h b/src/renderer/ral_types.h index 704f2cb..f1f7809 100644 --- a/src/renderer/ral_types.h +++ b/src/renderer/ral_types.h @@ -77,6 +77,11 @@ typedef enum gpu_buffer_type { CEL_BUFFER_COUNT } gpu_buffer_type; +static const char* buffer_type_names[] = { + "RAL Buffer Default", "RAL Buffer Vertex", "RAL Buffer Index", + "RAL Buffer Uniform", "RAL Buffer Count", +}; + typedef enum gpu_buffer_flag { CEL_BUFFER_FLAG_CPU = 1 << 0, CEL_BUFFER_FLAG_GPU = 1 << 1, @@ -196,6 +201,10 @@ typedef enum shader_binding_type { SHADER_BINDING_COUNT } shader_binding_type; +static const char* shader_binding_type_name[] = { "BUFFER", "BUFFER ARRAY", "TEXTURE", + "TEXTURE ARRAY", "SAMPLER", "BYTES", + "COUNT" }; + // pub trait ShaderBindable: Clone + Copy { // fn bind_to(&self, context: &mut PipelineContext, index: u32); // } @@ -223,6 +232,8 @@ typedef struct shader_binding { #define MAX_LAYOUT_BINDINGS 8 +void print_shader_binding(shader_binding b); + /** @brief A list of bindings that describe what data a shader / pipeline expects @note This roughly correlates to a descriptor set layout in Vulkan */ diff --git a/src/renderer/render.c b/src/renderer/render.c index 5723c9e..7833ac9 100644 --- a/src/renderer/render.c +++ b/src/renderer/render.c @@ -161,6 +161,7 @@ void draw_mesh(mesh* mesh, mat4* model) { // , mat4* view, mat4* proj) { } // Assume this has already been done /* encode_bind_shader_data(enc, 0, &mvp_uniforms_data); */ + encode_draw_indexed(enc, mesh->index_count); } diff --git a/src/renderer/render_types.h b/src/renderer/render_types.h index cc5fd93..349f65a 100644 --- a/src/renderer/render_types.h +++ b/src/renderer/render_types.h @@ -10,12 +10,19 @@ */ #pragma once +#include "defines.h" #include "ral.h" #include "ral_types.h" #if defined(CEL_PLATFORM_WINDOWS) // #include "backend_dx11.h" #endif +#if defined(CEL_REND_BACKEND_VULKAN) #include "backend_vulkan.h" +#elif defined(CEL_REND_BACKEND_METAL) +#include "backend_metal.h" +#elif defined(CEL_REND_BACKEND_OPENGL) +#include "backend_opengl.h" +#endif struct GLFWwindow; diff --git a/src/resources/gltf.c b/src/resources/gltf.c index 022bf95..61b99d7 100644 --- a/src/resources/gltf.c +++ b/src/resources/gltf.c @@ -47,7 +47,8 @@ model_handle model_load_gltf(struct core *core, const char *path, bool invert_te model model = { 0 }; model.name = str8_cstr_view(path); - model.meshes = mesh_darray_new(1); + // FIXME: Use mesh* malloc'd + /* model.meshes = mesh_darray_new(1); */ // model.materials = material_darray_new(1); bool success = @@ -771,4 +772,4 @@ bool model_load_gltf(const char *path, model *out_model) { TRACE("Finished loading GLTF"); return true; } -*/
\ No newline at end of file +*/ diff --git a/src/resources/obj.c b/src/resources/obj.c index 888e16e..19d8657 100644 --- a/src/resources/obj.c +++ b/src/resources/obj.c @@ -54,7 +54,7 @@ model_handle model_load_obj(core *core, const char *path, bool invert_textures_y model model = { 0 }; model.name = str8_cstr_view(path); - model.meshes = mesh_darray_new(1); + /* model.meshes = mallocmesh_darray_new(1); */ // model.materials = material_darray_new(1); bool success = model_load_obj_str(file_string, relative_path.path, &model, invert_textures_y); @@ -228,6 +228,8 @@ bool model_load_obj_str(const char *file_string, str8 relative_path, model *out_ // // TODO: bounding box calculation for each mesh // // TODO: bounding box calculation for model + // TODO: copy from mesh_darray to malloc'd mesh* array + return true; } diff --git a/src/std/mem.c b/src/std/mem.c index a5321fb..ede1db4 100644 --- a/src/std/mem.c +++ b/src/std/mem.c @@ -46,15 +46,18 @@ void arena_rewind(arena_save savepoint) { savepoint.arena->curr = savepoint.save // --- Pool -void_pool void_pool_create(arena* a, u64 capacity, u64 entry_size) { +void_pool void_pool_create(arena* a, const char* debug_label, u64 capacity, u64 entry_size) { size_t memory_requirements = capacity * entry_size; void* backing_buf = arena_alloc(a, memory_requirements); + assert(entry_size >= sizeof(void_pool_header)); // TODO: create my own assert with error message + void_pool pool = { .capacity = capacity, .entry_size = entry_size, .count = 0, .backing_buffer = backing_buf, - .free_list_head = NULL }; + .free_list_head = NULL, + .debug_label = debug_label }; void_pool_free_all(&pool); @@ -99,7 +102,7 @@ void* void_pool_alloc(void_pool* pool, u32* out_raw_handle) { uintptr_t start = (uintptr_t)pool->backing_buffer; uintptr_t cur = (uintptr_t)free_node; TRACE("%ld %ld ", start, cur); - /* assert(cur > start); */ + assert(cur > start); u32 index = (u32)((cur - start) / pool->entry_size); printf("Index %d\n", index); if (out_raw_handle != NULL) { diff --git a/src/std/mem.h b/src/std/mem.h index 1d508ce..6e76763 100644 --- a/src/std/mem.h +++ b/src/std/mem.h @@ -48,9 +48,10 @@ typedef struct void_pool { u64 count; void* backing_buffer; void_pool_header* free_list_head; + const char* debug_label; } void_pool; -void_pool void_pool_create(arena* a, u64 capacity, u64 entry_size); +void_pool void_pool_create(arena* a, const char* debug_label, u64 capacity, u64 entry_size); void void_pool_free_all(void_pool* pool); bool void_pool_is_empty(void_pool* pool); bool void_pool_is_full(void_pool* pool); @@ -68,7 +69,7 @@ void void_pool_dealloc(void_pool* pool, u32 raw_handle); } Name##_pool; \ \ static Name##_pool Name##_pool_create(arena* a, u64 cap, u64 entry_size) { \ - void_pool p = void_pool_create(a, cap, entry_size); \ + void_pool p = void_pool_create(a, "\"" #Name "\"", cap, entry_size); \ return (Name##_pool){ .inner = p }; \ } \ static inline T* Name##_pool_get(Name##_pool* pool, Name##_handle handle) { \ |