diff options
author | Omniscient <17525998+omnisci3nce@users.noreply.github.com> | 2024-05-03 21:37:51 +1000 |
---|---|---|
committer | Omniscient <17525998+omnisci3nce@users.noreply.github.com> | 2024-05-03 21:37:51 +1000 |
commit | aed7d1b7ac340c19656059c9cbd94aff40952f83 (patch) | |
tree | 9629cfda30456200e2496d0ac706de0002927614 /src | |
parent | 8407723dfc12d302f6b696f702a5ae9caaf219c1 (diff) |
create logical device
Diffstat (limited to 'src')
-rw-r--r-- | src/defines.h | 4 | ||||
-rw-r--r-- | src/renderer/backends/backend_dx11.h | 1 | ||||
-rw-r--r-- | src/renderer/backends/backend_vulkan.c | 237 | ||||
-rw-r--r-- | src/renderer/backends/vulkan_helpers.h | 42 | ||||
-rw-r--r-- | src/renderer/bind_group_layouts.h | 4 | ||||
-rw-r--r-- | src/renderer/cleanroom/types.h | 2 | ||||
-rw-r--r-- | src/renderer/immediate.h | 6 | ||||
-rw-r--r-- | src/renderer/ral.h | 7 | ||||
-rw-r--r-- | src/renderer/ral_types.h | 14 | ||||
-rw-r--r-- | src/renderer/render.h | 2 | ||||
-rw-r--r-- | src/renderer/render_types.h | 16 | ||||
-rw-r--r-- | src/renderer/renderpasses.h | 7 | ||||
-rw-r--r-- | src/scene.h | 10 | ||||
-rw-r--r-- | src/std/buf.h | 6 | ||||
-rw-r--r-- | src/std/mem.c | 9 | ||||
-rw-r--r-- | src/std/mem.h | 7 | ||||
-rw-r--r-- | src/systems/terrain.h | 2 |
17 files changed, 298 insertions, 78 deletions
diff --git a/src/defines.h b/src/defines.h index ec526e0..4b6f8c7 100644 --- a/src/defines.h +++ b/src/defines.h @@ -66,8 +66,8 @@ Renderer backend defines: // Platform will inform renderer backend (unless user overrides) #if defined(CEL_PLATFORM_LINUX) -#define CEL_REND_BACKEND_OPENGL 1 -// #define CEL_REND_BACKEND_VULKAN 1 +// #define CEL_REND_BACKEND_OPENGL 1 +#define CEL_REND_BACKEND_VULKAN 1 #endif #if defined(CEL_PLATFORM_WINDOWS) diff --git a/src/renderer/backends/backend_dx11.h b/src/renderer/backends/backend_dx11.h index 8e3a513..53738aa 100644 --- a/src/renderer/backends/backend_dx11.h +++ b/src/renderer/backends/backend_dx11.h @@ -8,7 +8,6 @@ // typedef struct gpu_swapchain gpu_swapchain; typedef struct gpu_device { - // VkPhysicalDevice physical_device; // VkDevice logical_device; // VkPhysicalDeviceProperties properties; diff --git a/src/renderer/backends/backend_vulkan.c b/src/renderer/backends/backend_vulkan.c index ae857a0..c21a6b9 100644 --- a/src/renderer/backends/backend_vulkan.c +++ b/src/renderer/backends/backend_vulkan.c @@ -5,6 +5,7 @@ #include <vulkan/vulkan_core.h> #include "backend_vulkan.h" +#include "maths_types.h" #include "mem.h" #include "vulkan_helpers.h" @@ -31,6 +32,8 @@ typedef struct vulkan_context { u32 screen_width; u32 screen_height; + + VkDebugUtilsMessengerEXT vk_debugger; } vulkan_context; static vulkan_context context; @@ -39,6 +42,13 @@ static vulkan_context context; /** @brief Enumerates and selects the most appropriate graphics device */ bool select_physical_device(gpu_device* out_device); + +bool is_physical_device_suitable(VkPhysicalDevice device); + +queue_family_indices find_queue_families(VkPhysicalDevice device); + +bool create_logical_device(gpu_device* out_device); + /** @brief Helper function for creating array of all extensions we want */ cstr_darray* get_all_extensions(); @@ -49,7 +59,7 @@ bool gpu_backend_init(const char* window_name, GLFWwindow* window) { // Create an allocator size_t temp_arena_size = 1024 * 1024; - arena_create(malloc(temp_arena_size), temp_arena_size); + context.temp_arena = arena_create(malloc(temp_arena_size), temp_arena_size); // Setup Vulkan instance VkApplicationInfo app_info = { VK_STRUCTURE_TYPE_APPLICATION_INFO }; @@ -63,20 +73,61 @@ bool gpu_backend_init(const char* window_name, GLFWwindow* window) { create_info.pApplicationInfo = &app_info; // Extensions - // FIXME: Use my own extension choices - // cstr_darray* required_extensions = cstr_darray_new(2); + cstr_darray* required_extensions = cstr_darray_new(2); // cstr_darray_push(required_extensions, VK_KHR_SURFACE_EXTENSION_NAME); - // create_info.enabledExtensionCount = cstr_darray_len(required_extensions); - // create_info.ppEnabledExtensionNames = required_extensions->data; + uint32_t count; const char** extensions = glfwGetRequiredInstanceExtensions(&count); - create_info.enabledExtensionCount = count; - create_info.ppEnabledExtensionNames = extensions; + for (u32 i = 0; i < count; i++) { + cstr_darray_push(required_extensions, extensions[i]); + } + + cstr_darray_push(required_extensions, VK_EXT_DEBUG_UTILS_EXTENSION_NAME); + + DEBUG("Required extensions:"); + for (u32 i = 0; i < cstr_darray_len(required_extensions); i++) { + DEBUG(" %s", required_extensions->data[i]); + } + + create_info.enabledExtensionCount = cstr_darray_len(required_extensions); + create_info.ppEnabledExtensionNames = required_extensions->data; // TODO: Validation layers create_info.enabledLayerCount = 0; create_info.ppEnabledLayerNames = NULL; + INFO("Validation layers enabled"); + cstr_darray* desired_validation_layers = cstr_darray_new(1); + cstr_darray_push(desired_validation_layers, "VK_LAYER_KHRONOS_validation"); + + u32 n_available_layers = 0; + VK_CHECK(vkEnumerateInstanceLayerProperties(&n_available_layers, 0)); + TRACE("%d available layers", n_available_layers); + VkLayerProperties* available_layers = + arena_alloc(&context.temp_arena, n_available_layers * sizeof(VkLayerProperties)); + VK_CHECK(vkEnumerateInstanceLayerProperties(&n_available_layers, available_layers)); + + for (int i = 0; i < cstr_darray_len(desired_validation_layers); i++) { + // look through layers to make sure we can find the ones we want + bool found = false; + for (int j = 0; j < n_available_layers; j++) { + if (str8_equals(str8_cstr_view(desired_validation_layers->data[i]), + str8_cstr_view(available_layers[j].layerName))) { + found = true; + TRACE("Found layer %s", desired_validation_layers->data[i]); + break; + } + } + + if (!found) { + FATAL("Required validation is missing %s", desired_validation_layers->data[i]); + return false; + } + } + INFO("All validation layers are present"); + create_info.enabledLayerCount = cstr_darray_len(desired_validation_layers); + create_info.ppEnabledLayerNames = desired_validation_layers->data; + VkResult result = vkCreateInstance(&create_info, NULL, &context.instance); if (result != VK_SUCCESS) { ERROR("vkCreateInstance failed with result: %u", result); @@ -84,6 +135,25 @@ bool gpu_backend_init(const char* window_name, GLFWwindow* window) { } TRACE("Vulkan Instance created"); + DEBUG("Creating Vulkan debugger"); + u32 log_severity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT; + VkDebugUtilsMessengerCreateInfoEXT debug_create_info = { + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT + }; + debug_create_info.messageSeverity = log_severity; + debug_create_info.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT; + debug_create_info.pfnUserCallback = vk_debug_callback; + + PFN_vkCreateDebugUtilsMessengerEXT func = + (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(context.instance, + "vkCreateDebugUtilsMessengerEXT"); + assert(func); + VK_CHECK(func(context.instance, &debug_create_info, context.allocator, &context.vk_debugger)); + DEBUG("Vulkan Debugger created"); + // Surface creation VkSurfaceKHR surface; VK_CHECK(glfwCreateWindowSurface(context.instance, window, NULL, &surface)); @@ -96,39 +166,43 @@ bool gpu_backend_init(const char* window_name, GLFWwindow* window) { void gpu_backend_shutdown() { arena_free_storage(&context.temp_arena); } bool gpu_device_create(gpu_device* out_device) { + // First things first store this poitner from the renderer + context.device = out_device; + + arena_save savept = arena_savepoint(&context.temp_arena); // Physical device if (!select_physical_device(out_device)) { return false; } TRACE("Physical device selected"); - // Features - VkPhysicalDeviceFeatures device_features = { 0 }; - device_features.samplerAnisotropy = VK_TRUE; // request anistrophy - // Logical device - VkDeviceQueueCreateInfo queue_create_info[2]; - //.. - VkDeviceCreateInfo device_create_info = { VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO }; - device_create_info.queueCreateInfoCount = VULKAN_QUEUES_COUNT; - device_create_info.pQueueCreateInfos = queue_create_info; - device_create_info.pEnabledFeatures = &device_features; - device_create_info.enabledExtensionCount = 1; - const char* extension_names = VK_KHR_SWAPCHAIN_EXTENSION_NAME; - device_create_info.ppEnabledExtensionNames = &extension_names; - - VkResult result = vkCreateDevice(out_device->physical_device, &device_create_info, - context.allocator, &out_device->logical_device); - if (result != VK_SUCCESS) { - FATAL("Error creating logical device with status %u\n", result); - exit(1); - } - TRACE("Logical device created"); + create_logical_device(out_device); + // VkDeviceQueueCreateInfo queue_create_info = {}; + + // queue_family_indices indices = find_queue_families(context.device->physical_device); + // //.. + // VkDeviceCreateInfo device_create_info = { VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO }; + // device_create_info.queueCreateInfoCount = VULKAN_QUEUES_COUNT; + // device_create_info.pQueueCreateInfos = queue_create_info; + // device_create_info.pEnabledFeatures = &device_features; + // device_create_info.enabledExtensionCount = 1; + // const char* extension_names = VK_KHR_SWAPCHAIN_EXTENSION_NAME; + // device_create_info.ppEnabledExtensionNames = &extension_names; + + // VkResult result = vkCreateDevice(out_device->physical_device, &device_create_info, + // context.allocator, &out_device->logical_device); + // if (result != VK_SUCCESS) { + // FATAL("Error creating logical device with status %u\n", result); + // exit(1); + // } + // TRACE("Logical device created"); // Queues // Create the command pool + arena_rewind(savept); // Free any temp data return true; } @@ -316,4 +390,109 @@ inline void encode_draw_indexed(gpu_cmd_encoder* encoder, u64 index_count) { vkCmdDrawIndexed(encoder->cmd_buffer, index_count, 1, 0, 0, 0); } -bool select_physical_device(gpu_device* out_device) {}
\ No newline at end of file +bool select_physical_device(gpu_device* out_device) { + u32 physical_device_count = 0; + VK_CHECK(vkEnumeratePhysicalDevices(context.instance, &physical_device_count, 0)); + if (physical_device_count == 0) { + FATAL("No devices that support vulkan were found"); + return false; + } + TRACE("Number of devices found %d", physical_device_count); + + VkPhysicalDevice* physical_devices = + arena_alloc(&context.temp_arena, physical_device_count * sizeof(VkPhysicalDevice)); + VK_CHECK(vkEnumeratePhysicalDevices(context.instance, &physical_device_count, physical_devices)); + + bool found = false; + for (u32 device_i = 0; device_i < physical_device_count; device_i++) { + if (is_physical_device_suitable(physical_devices[device_i])) { + out_device->physical_device = physical_devices[device_i]; + found = true; + break; + } + } + + if (!found) { + FATAL("Couldn't find a suitable physical device"); + return false; + } + + return true; +} + +bool is_physical_device_suitable(VkPhysicalDevice device) { + VkPhysicalDeviceProperties properties; + vkGetPhysicalDeviceProperties(device, &properties); + + VkPhysicalDeviceFeatures features; + vkGetPhysicalDeviceFeatures(device, &features); + + VkPhysicalDeviceMemoryProperties memory; + vkGetPhysicalDeviceMemoryProperties(device, &memory); + + // TODO: Check against these device properties + + queue_family_indices indices = find_queue_families(device); + + return indices.has_graphics; +} + +queue_family_indices find_queue_families(VkPhysicalDevice device) { + queue_family_indices indices = { 0 }; + + u32 queue_family_count = 0; + vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, 0); + + VkQueueFamilyProperties* queue_families = + arena_alloc(&context.temp_arena, queue_family_count * sizeof(VkQueueFamilyProperties)); + vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, queue_families); + + for (u32 queue_i = 0; queue_i < queue_family_count; queue_i++) { + // Graphics queue + if (queue_families[queue_i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { + indices.graphics_queue_index = queue_i; + indices.has_graphics = true; + } + } + + return indices; +} + +bool create_logical_device(gpu_device* out_device) { + queue_family_indices indices = find_queue_families(out_device->physical_device); + + f32 prio_one = 1.0; + VkDeviceQueueCreateInfo queue_create_info = { VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO }; + queue_create_info.queueFamilyIndex = indices.graphics_queue_index; + queue_create_info.queueCount = 1; + queue_create_info.pQueuePriorities = &prio_one; + queue_create_info.flags = 0; + queue_create_info.pNext = 0; + + // Features + VkPhysicalDeviceFeatures device_features = { 0 }; + device_features.samplerAnisotropy = VK_TRUE; // request anistrophy + + // Device itself + VkDeviceCreateInfo device_create_info = { VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO }; + device_create_info.queueCreateInfoCount = 1; + device_create_info.pQueueCreateInfos = &queue_create_info; + device_create_info.pEnabledFeatures = &device_features; + device_create_info.enabledExtensionCount = 1; + const char* extension_names = VK_KHR_SWAPCHAIN_EXTENSION_NAME; + device_create_info.ppEnabledExtensionNames = &extension_names; + + // deprecated + device_create_info.enabledLayerCount = 0; + device_create_info.ppEnabledLayerNames = 0; + + VkResult result = vkCreateDevice(context.device->physical_device, &device_create_info, + context.allocator, &context.device->logical_device); + if (result != VK_SUCCESS) { + printf("error creating logical device with status %u\n", result); + ERROR_EXIT("Unable to create vulkan logical device. Exiting.."); + } + TRACE("Logical device created"); + + return true; +}
\ No newline at end of file diff --git a/src/renderer/backends/vulkan_helpers.h b/src/renderer/backends/vulkan_helpers.h index 4bd02f1..baff4e7 100644 --- a/src/renderer/backends/vulkan_helpers.h +++ b/src/renderer/backends/vulkan_helpers.h @@ -20,16 +20,27 @@ static void plat_get_required_extension_names(cstr_darray* extensions) { } // TODO(omni): port to using internal assert functions -#define VK_CHECK(vulkan_expr) \ - do { \ - VkResult res = vulkan_expr; \ - if (res != VK_SUCCESS) { \ +#define VK_CHECK(vulkan_expr) \ + do { \ + VkResult res = vulkan_expr; \ + if (res != VK_SUCCESS) { \ ERROR_EXIT("Vulkan error: %u", res); \ - } \ + } \ } while (0) // TODO: typedef struct vk_debugger {} vk_debugger; +typedef struct queue_family_indices { + u32 graphics_queue_index; + u32 present_queue_index; + u32 compute_queue_index; + u32 transfer_queue_index; + bool has_graphics; + bool has_present; + bool has_compute; + bool has_transfer; +} queue_family_indices; + typedef struct vulkan_physical_device_requirements { bool graphics; bool present; @@ -168,4 +179,25 @@ static bool physical_device_meets_requirements( } return false; +} + +VKAPI_ATTR VkBool32 VKAPI_CALL vk_debug_callback( + VkDebugUtilsMessageSeverityFlagBitsEXT severity, VkDebugUtilsMessageTypeFlagsEXT flags, + const VkDebugUtilsMessengerCallbackDataEXT* callback_data, void* user_data) { + switch (severity) { + default: + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT: + ERROR("%s", callback_data->pMessage); + break; + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT: + WARN("%s", callback_data->pMessage); + break; + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT: + INFO("%s", callback_data->pMessage); + break; + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT: + TRACE("%s", callback_data->pMessage); + break; + } + return VK_FALSE; }
\ No newline at end of file diff --git a/src/renderer/bind_group_layouts.h b/src/renderer/bind_group_layouts.h index d163fab..d2571ef 100644 --- a/src/renderer/bind_group_layouts.h +++ b/src/renderer/bind_group_layouts.h @@ -4,9 +4,9 @@ * @brief Common bindgroups (descriptor set layouts) * @version 0.1 * @date 2024-04-28 - * + * * @copyright Copyright (c) 2024 - * + * */ #pragma once #include "defines.h" diff --git a/src/renderer/cleanroom/types.h b/src/renderer/cleanroom/types.h index 7360ebe..6686be5 100644 --- a/src/renderer/cleanroom/types.h +++ b/src/renderer/cleanroom/types.h @@ -2,5 +2,5 @@ #include "darray.h" #include "defines.h" #include "maths_types.h" -#include "str.h" #include "render_types.h" +#include "str.h" diff --git a/src/renderer/immediate.h b/src/renderer/immediate.h index 6d93c53..b9d7c61 100644 --- a/src/renderer/immediate.h +++ b/src/renderer/immediate.h @@ -14,7 +14,7 @@ void imm_draw_camera_frustum(); // const char* model_filepath); // tracks internally whether the model is loaded // static void imm_draw_model(const char* model_filepath) { - // check that model is loaded - // if not loaded, load model and upload to gpu - LRU cache for models - // else submit draw call +// check that model is loaded +// if not loaded, load model and upload to gpu - LRU cache for models +// else submit draw call // }
\ No newline at end of file diff --git a/src/renderer/ral.h b/src/renderer/ral.h index 8e49dbe..7c143f2 100644 --- a/src/renderer/ral.h +++ b/src/renderer/ral.h @@ -11,10 +11,10 @@ */ #pragma once -#include "ral_types.h" +#include "buf.h" #include "defines.h" +#include "ral_types.h" #include "str.h" -#include "buf.h" // Unrelated forward declares typedef struct arena arena; @@ -60,7 +60,6 @@ struct graphics_pipeline_desc { }; typedef struct gpu_renderpass_desc { - } gpu_renderpass_desc; // --- Lifecycle functions @@ -97,7 +96,7 @@ void buffer_upload_bytes(buffer_handle gpu_buf, bytebuffer cpu_buf, u64 offset, void encode_bind_pipeline(gpu_cmd_encoder* encoder, pipeline_kind kind, gpu_pipeline* pipeline); void encode_set_vertex_buffer(gpu_cmd_encoder* encoder, buffer_handle buf); void encode_set_index_buffer(gpu_cmd_encoder* encoder, buffer_handle buf); -void encode_set_bind_group(); // TODO +void encode_set_bind_group(); // TODO void encode_draw(gpu_cmd_encoder* encoder); void encode_draw_indexed(gpu_cmd_encoder* encoder, u64 index_count); diff --git a/src/renderer/ral_types.h b/src/renderer/ral_types.h index d6c5865..ae54b53 100644 --- a/src/renderer/ral_types.h +++ b/src/renderer/ral_types.h @@ -1,18 +1,18 @@ /** * @file ral_types.h * @author your name (you@domain.com) - * @brief + * @brief * @version 0.1 * @date 2024-04-27 - * + * * @copyright Copyright (c) 2024 - * + * */ #pragma once +#include "darray.h" #include "defines.h" #include "maths_types.h" -#include "darray.h" #ifndef RENDERER_TYPED_HANDLES CORE_DEFINE_HANDLE(buffer_handle); @@ -99,11 +99,7 @@ KITC_DECL_TYPED_ARRAY(u32) #define TYPED_VERTEX_ARRAY #endif -typedef enum gpu_cull_mode { - CULL_BACK_FACE, - CULL_FRONT_FACE, - CULL_COUNT -} gpu_cull_mode; +typedef enum gpu_cull_mode { CULL_BACK_FACE, CULL_FRONT_FACE, CULL_COUNT } gpu_cull_mode; // ? How to tie together materials and shaders diff --git a/src/renderer/render.h b/src/renderer/render.h index a9370e0..e6dd8b8 100644 --- a/src/renderer/render.h +++ b/src/renderer/render.h @@ -10,8 +10,8 @@ */ #pragma once -#include "render_types.h" #include "ral_types.h" +#include "render_types.h" bool renderer_init(renderer* ren); void renderer_shutdown(renderer* ren); diff --git a/src/renderer/render_types.h b/src/renderer/render_types.h index 4866ef4..a5c0c1a 100644 --- a/src/renderer/render_types.h +++ b/src/renderer/render_types.h @@ -1,21 +1,21 @@ /** * @file render_types.h * @author your name (you@domain.com) - * @brief + * @brief * @version 0.1 * @date 2024-04-27 - * + * * @copyright Copyright (c) 2024 - * + * */ #pragma once -#include "ral_types.h" #include "ral.h" +#include "ral_types.h" #if defined(CEL_PLATFORM_WINDOWS) // #include "backend_dx11.h" -#include "backend_vulkan.h" #endif +#include "backend_vulkan.h" struct GLFWwindow; @@ -37,7 +37,7 @@ typedef struct renderer { typedef struct geometry_data { vertex_format format; - vertex_darray* vertices; // TODO: make it not a pointer + vertex_darray* vertices; // TODO: make it not a pointer bool has_indices; u32_darray indices; vec3 colour; /** Optional: set vertex colours */ @@ -66,8 +66,8 @@ typedef struct model { typedef struct texture { u32 texture_id; char name[256]; - void *image_data; - void *backend_data; + void* image_data; + void* backend_data; u32 width; u32 height; u8 channel_count; diff --git a/src/renderer/renderpasses.h b/src/renderer/renderpasses.h index 67badaa..91970d8 100644 --- a/src/renderer/renderpasses.h +++ b/src/renderer/renderpasses.h @@ -4,9 +4,9 @@ * @brief Built-in renderpasses to the engine * @version 0.1 * @date 2024-04-28 - * + * * @copyright Copyright (c) 2024 - * + * */ #pragma once #include "maths_types.h" @@ -25,7 +25,8 @@ typedef struct render_entity { // Don't need to pass in *anything*. gpu_renderpass* renderpass_blinn_phong_create(); -void renderpass_blinn_phong_execute(gpu_renderpass* pass, render_entity* entities, size_t entity_count); +void renderpass_blinn_phong_execute(gpu_renderpass* pass, render_entity* entities, + size_t entity_count); gpu_renderpass* renderpass_shadows_create(); void renderpass_shadows_execute(gpu_renderpass* pass, render_entity* entities, size_t entity_count);
\ No newline at end of file diff --git a/src/scene.h b/src/scene.h index 2cc4d8a..6cac061 100644 --- a/src/scene.h +++ b/src/scene.h @@ -1,12 +1,12 @@ /** * @file scene.h * @author your name (you@domain.com) - * @brief + * @brief * @version 0.1 * @date 2024-04-27 - * + * * @copyright Copyright (c) 2024 - * + * */ #include "defines.h" #include "types.h" @@ -24,7 +24,7 @@ bool scene_add_point_light(scene* s /* TODO */); bool scene_add_heightmap(scene* s /* TODO */); bool scene_delete_heightmap(scene* s); -bool scene_add_model(scene *s, model_handle model); -void scene_remove_model(scene *s, model_handle model); +bool scene_add_model(scene* s, model_handle model); +void scene_remove_model(scene* s, model_handle model); // TODO: functions to load and save scenes from disk
\ No newline at end of file diff --git a/src/std/buf.h b/src/std/buf.h index b0f8b85..de093ec 100644 --- a/src/std/buf.h +++ b/src/std/buf.h @@ -1,12 +1,12 @@ /** * @file buf.h * @author your name (you@domain.com) - * @brief + * @brief * @version 0.1 * @date 2024-04-28 - * + * * @copyright Copyright (c) 2024 - * + * */ #pragma once #include "defines.h" diff --git a/src/std/mem.c b/src/std/mem.c index 5468898..4886d72 100644 --- a/src/std/mem.c +++ b/src/std/mem.c @@ -31,4 +31,11 @@ void arena_free_all(arena* a) { a->curr = a->begin; // pop everything at once and reset to the start. } -void arena_free_storage(arena* a) { free(a->begin); }
\ No newline at end of file +void arena_free_storage(arena* a) { free(a->begin); } + +arena_save arena_savepoint(arena* a) { + arena_save savept = { .arena = a, .savepoint = a->curr }; + return savept; +} + +void arena_rewind(arena_save savepoint) { savepoint.arena->curr = savepoint.savepoint; }
\ No newline at end of file diff --git a/src/std/mem.h b/src/std/mem.h index 2f92894..bbfb852 100644 --- a/src/std/mem.h +++ b/src/std/mem.h @@ -18,9 +18,16 @@ typedef struct arena { char* end; } arena; +typedef struct arena_save { + arena* arena; + char* savepoint; +} arena_save; + arena arena_create(void* backing_buffer, size_t capacity); void* arena_alloc(arena* a, size_t size); void* arena_alloc_align(arena* a, size_t size, size_t align); void arena_free_all(arena* a); void arena_free_storage(arena* a); +arena_save arena_savepoint(arena* a); +void arena_rewind(arena_save savepoint); // TODO: arena_resize
\ No newline at end of file diff --git a/src/systems/terrain.h b/src/systems/terrain.h index 3d6f1c1..a8bff17 100644 --- a/src/systems/terrain.h +++ b/src/systems/terrain.h @@ -29,7 +29,7 @@ typedef struct heightmap { typedef struct terrain_state { arena terrain_allocator; - heightmap* heightmap; // NULL = no heightmap + heightmap* heightmap; // NULL = no heightmap } terrain_state; bool terrain_system_init(terrain_state* state); |