1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
|
/**
* @file ral.h
* @author your name (you@domain.com)
* @brief Render Abstraction Layer
* @details API that a graphics backend *must* implement
* @version 0.1
* @date 2024-03-31
*
* @copyright Copyright (c) 2024
*
*/
#pragma once
#include "buf.h"
#include "defines.h"
#include "ral_types.h"
#include "str.h"
// Unrelated forward declares
typedef struct arena arena;
struct GLFWwindow;
// Forward declare structs
typedef struct gpu_swapchain gpu_swapchain;
typedef struct gpu_device gpu_device;
typedef struct gpu_pipeline_layout gpu_pipeline_layout;
typedef struct gpu_pipeline gpu_pipeline;
typedef struct gpu_renderpass gpu_renderpass;
typedef struct gpu_cmd_encoder gpu_cmd_encoder; // Recording
typedef struct gpu_cmd_buffer gpu_cmd_buffer; // Ready for submission
typedef struct gpu_buffer gpu_buffer;
#define MAX_SHADER_DATA_LAYOUTS 5
/** @brief A*/
// typedef struct gpu_bind_group
// Pools
typedef struct gpu_backend_pools {
// pools for each gpu structure
} gpu_backend_pools;
typedef enum pipeline_kind {
PIPELINE_GRAPHICS,
PIPELINE_COMPUTE,
} pipeline_kind;
typedef struct shader_desc {
const char* debug_name;
str8 filepath; // where it came from
str8 code; // Either GLSL or SPIRV bytecode
bool is_spirv;
} shader_desc;
struct graphics_pipeline_desc {
const char* debug_name;
shader_desc vs; /** @brief Vertex shader stage */
shader_desc fs; /** @brief Fragment shader stage */
/* shader_data_layout data_layouts[MAX_SHADER_DATA_LAYOUTS]; */
/* u32 data_layouts_count; */
// Roughly equivalent to a descriptor set layout each. each layout can have multiple bindings
// examples:
// - uniform buffer reprensenting view projection matrix
// - texture for shadow map ?
shader_data data_layouts[MAX_SHADER_DATA_LAYOUTS];
u32 data_layouts_count;
// gpu_pipeline_layout* layout;
gpu_renderpass* renderpass;
bool wireframe;
bool depth_test;
};
typedef struct gpu_renderpass_desc {
} gpu_renderpass_desc;
// --- Lifecycle functions
bool gpu_backend_init(const char* window_name, struct GLFWwindow* window);
void gpu_backend_shutdown();
// TEMP
bool gpu_backend_begin_frame();
void gpu_backend_end_frame();
bool gpu_device_create(gpu_device* out_device);
void gpu_device_destroy();
gpu_renderpass* gpu_renderpass_create(const gpu_renderpass_desc* description);
void gpu_renderpass_destroy(gpu_renderpass* pass);
gpu_pipeline* gpu_graphics_pipeline_create(struct graphics_pipeline_desc description);
void gpu_pipeline_destroy(gpu_pipeline* pipeline);
bool gpu_swapchain_create(gpu_swapchain* out_swapchain);
void gpu_swapchain_destroy(gpu_swapchain* swapchain);
gpu_cmd_encoder gpu_cmd_encoder_create();
void gpu_cmd_encoder_destroy(gpu_cmd_encoder* encoder);
void gpu_cmd_encoder_begin(gpu_cmd_encoder encoder);
void gpu_cmd_encoder_begin_render(gpu_cmd_encoder* encoder, gpu_renderpass* renderpass);
void gpu_cmd_encoder_end_render(gpu_cmd_encoder* encoder);
void gpu_cmd_encoder_begin_compute();
gpu_cmd_encoder* gpu_get_default_cmd_encoder();
/* Actual commands that we can encode */
void encode_buffer_copy(gpu_cmd_encoder* encoder, buffer_handle src, u64 src_offset,
buffer_handle dst, u64 dst_offset, u64 copy_size);
void encode_clear_buffer(gpu_cmd_encoder* encoder, buffer_handle buf);
void encode_set_pipeline(gpu_cmd_encoder* encoder, gpu_pipeline* pipeline);
/** @brief Upload CPU-side data as array of bytes to a GPU buffer */
void buffer_upload_bytes(buffer_handle gpu_buf, bytebuffer cpu_buf, u64 offset, u64 size);
// render pass
void encode_bind_pipeline(gpu_cmd_encoder* encoder, pipeline_kind kind, gpu_pipeline* pipeline);
void encode_bind_shader_data(gpu_cmd_encoder* encoder, u32 group, shader_data* data);
void encode_set_default_settings(gpu_cmd_encoder* encoder);
void encode_set_vertex_buffer(gpu_cmd_encoder* encoder, buffer_handle buf);
void encode_set_index_buffer(gpu_cmd_encoder* encoder, buffer_handle buf);
void encode_set_bind_group(); // TODO
void encode_draw(gpu_cmd_encoder* encoder);
void encode_draw_indexed(gpu_cmd_encoder* encoder, u64 index_count);
// FUTURE: compute passes
/** @brief Finish recording and return a command buffer that can be submitted to a queue */
gpu_cmd_buffer gpu_cmd_encoder_finish(gpu_cmd_encoder* encoder);
void gpu_queue_submit(gpu_cmd_buffer* buffer);
// Buffers
buffer_handle gpu_buffer_create(u64 size, gpu_buffer_type buf_type, gpu_buffer_flags flags,
const void* data);
void gpu_buffer_destroy(buffer_handle buffer);
void gpu_buffer_upload();
void gpu_buffer_bind(buffer_handle buffer);
// Textures
void gpu_texture_create();
void gpu_texture_destroy();
void gpu_texture_upload();
// Samplers
void gpu_sampler_create();
// --- Vertex formats
bytebuffer vertices_as_bytebuffer(arena* a, vertex_format format, vertex_darray* vertices);
// TODO: Bindgroup texture samplers / shader resources
// TEMP
void gpu_temp_draw(size_t n_verts);
|