summaryrefslogtreecommitdiff
path: root/video.cpp
diff options
context:
space:
mode:
authorquou <quou@disroot.org>2024-12-19 19:20:50 +1100
committerquou <quou@disroot.org>2024-12-19 19:22:20 +1100
commitbec7a1c90f7cda0a9e7d2e2628ac69b645108dc4 (patch)
tree830660aab41d90c24302d2c64e096292b62a4394 /video.cpp
parent73744341846d4e76d6910dc5a15dff6d74586e39 (diff)
basic vulkan setup
Diffstat (limited to 'video.cpp')
-rw-r--r--video.cpp1238
1 files changed, 1238 insertions, 0 deletions
diff --git a/video.cpp b/video.cpp
new file mode 100644
index 0000000..c274e65
--- /dev/null
+++ b/video.cpp
@@ -0,0 +1,1238 @@
+#include "app.hpp"
+#include "video.hpp"
+
+#define device_heap_size (1024 * 1024 * 4)
+#define max_textures 1024
+
+extern "C" {
+#include "memory.h"
+#include "plat.h"
+#include "str.h"
+#include "sc/sh_enums.h"
+}
+
+#include <algorithm>
+#include <new>
+#include <tuple>
+#include <unordered_map>
+
+#define VK_USE_PLATFORM_XLIB_KHR
+#define GLAD_VULKAN_IMPLEMENTATION
+#include "glad_vk.h"
+
+const char* device_exts[] = {
+ VK_KHR_SWAPCHAIN_EXTENSION_NAME
+};
+
+extern "C" {
+VkSurfaceKHR app_create_vk_surface(App* app, VkInstance inst);
+void app_destroy_vk_surface(
+ App* app,
+ VkInstance inst,
+ VkSurfaceKHR surf
+);
+}
+
+template <typename ID, typename T, int size>
+struct ID_Map
+{
+ T storage[size];
+ ID keys[size];
+
+ void init() {
+ int i;
+ for (i = 0; i < size; i++) {
+ keys[i] = 0;
+ }
+ }
+
+ std::pair<T*, uint32_t> bucket(ID id) {
+ int index = (int)id % size, i;
+ for (i = 0; i < size; i++) {
+ ID key = keys[index];
+ if (!key || key == id) return { &storage[index], index };
+ index = (index + 1) % size;
+ }
+ return { 0, 0 };
+ }
+
+ T& set(ID id, const T& v) {
+ auto [b, index] = bucket(id);
+ assert(b != 0);
+ assert(!keys[index]);
+ keys[index] = id;
+ *b = v;
+ return *b;
+ }
+
+ T& operator[](ID id) {
+ T* b = bucket(id).first;
+ assert(b != 0);
+ return *b;
+ }
+};
+
+static void* vk_alloc(
+ void* uptr,
+ size_t size,
+ size_t alignment,
+ VkSystemAllocationScope scope
+) {
+ Device* d = (Device*)uptr;
+ void* r;
+ (void)scope;
+ if (!size) return 0;
+ r = heap_alloc_aligned(
+ d->heap,
+ size,
+ alignment
+ );
+ if (!r) {
+ print_err("Out of memory.");
+ pbreak(4096);
+ }
+ return r;
+}
+
+static void vk_free(
+ void* uptr,
+ void* ptr
+) {
+ Device* d = (Device*)uptr;
+ heap_free(d->heap, ptr);
+}
+
+static void* vk_realloc(
+ void* uptr,
+ void* old,
+ size_t size,
+ size_t alignment,
+ VkSystemAllocationScope scope
+) {
+ int os;
+ void* na;
+ (void)scope;
+ if (!old)
+ return vk_alloc(uptr, size, alignment, scope);
+ if (!size) {
+ vk_free(uptr, old);
+ return 0;
+ }
+ os = heap_block_size(old);
+ na = vk_alloc(uptr, size, alignment, scope);
+ memcpy(na, old, std::min(os, (int)size));
+ vk_free(uptr, old);
+ return na;
+}
+
+typedef struct {
+ VkSurfaceCapabilitiesKHR cap;
+ unsigned fmt_count, pm_count;
+ VkSurfaceFormatKHR* fmts;
+ VkPresentModeKHR* pms;
+} Swap_Cap;
+
+static void get_swap_cap(
+ Device* d,
+ VkPhysicalDevice dev,
+ VkSurfaceKHR surf,
+ Swap_Cap* cap
+) {
+ cap->fmts = 0;
+ cap->pms = 0;
+ vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
+ dev,
+ surf,
+ &cap->cap
+ );
+ vkGetPhysicalDeviceSurfaceFormatsKHR(
+ dev,
+ surf,
+ &cap->fmt_count,
+ 0
+ );
+ if (cap->fmt_count) {
+ cap->fmts = (VkSurfaceFormatKHR*)heap_alloc(
+ d->heap,
+ sizeof *cap->fmts * cap->fmt_count
+ );
+ vkGetPhysicalDeviceSurfaceFormatsKHR(
+ dev,
+ surf,
+ &cap->fmt_count,
+ cap->fmts
+ );
+ }
+ vkGetPhysicalDeviceSurfacePresentModesKHR(
+ dev,
+ surf,
+ &cap->pm_count,
+ 0
+ );
+ if (cap->pm_count) {
+ cap->pms = (VkPresentModeKHR*)heap_alloc(
+ d->heap,
+ sizeof *cap->pms * cap->pm_count
+ );
+ vkGetPhysicalDeviceSurfacePresentModesKHR(
+ dev,
+ surf,
+ &cap->pm_count,
+ cap->pms
+ );
+ }
+}
+
+static void deinit_swap_cap(
+ Device* d,
+ Swap_Cap* cap
+) {
+ if (cap->fmts) heap_free(d->heap, cap->fmts);
+ if (cap->pms) heap_free(d->heap, cap->pms);
+}
+
+struct Device_Vk;
+struct Swapchain {
+ VkSwapchainKHR swapchain;
+ Texture_Id* textures;
+ VkSurfaceFormatKHR format;
+ VkExtent2D size;
+ VkPresentModeKHR mode;
+ int image_count;
+
+ void init(const App& app, Device_Vk* dev);
+ void initr(const App& app, Device_Vk* dev);
+ void recreate(const App& app, Device_Vk* dev);
+ void get_images(Device_Vk* dev);
+ void destroy(Device_Vk* dev);
+};
+
+#define max_contexts 16
+
+enum {
+ context_state_avail = 1 << 0,
+ context_state_init = 1 << 1
+};
+
+struct Shader_Vk : public Shader {
+ SProgram_Type type;
+ VkShaderModule modules[shader_type_count];
+
+ bool init(Device_Vk* dev, FILE* f);
+ bool init_module(
+ Device_Vk* dev,
+ int stage,
+ char* buf,
+ int size
+ );
+ void destroy_internal(Device_Vk* dev);
+};
+
+struct Context_Vk : public Context {
+ int state;
+ VkCommandBuffer cb;
+ VkCommandPool pool;
+ VkFence fence;
+ VkSemaphore semaphore;
+
+ void init_pool(Device_Vk* dev);
+ void init_cb(Device_Vk* dev);
+ void init_sync(Device_Vk* dev);
+ void init(Device_Vk* dev);
+ void begin_record(Device_Vk* dev);
+ Context_Vk& acquire(Device_Vk* dev);
+ void release();
+};
+
+struct Texture_Vk : public Texture {
+ VkImage image;
+ VkImageView view;
+};
+
+struct Renderpass_Vk {
+ VkRenderPass rpo;
+ VkFramebuffer fbo;
+ VkClearValue clear;
+ int age;
+
+ void on_submit() {
+ age = 0;
+ }
+
+ void init_rp(Device_Vk* dev, const Render_Pass& rp);
+ void init_fb(Device_Vk* dev, const Render_Pass& rp);
+ void destroy(Device_Vk* dev);
+};
+
+template<>
+struct std::hash<Render_Pass>
+{
+ size_t operator()(const Render_Pass& rp) const {
+ return fnv1a64((uint8_t*)&rp, sizeof rp);
+ }
+};
+
+struct Device_Vk : public Device {
+ VkAllocationCallbacks ac;
+ VkInstance inst;
+ VkDevice dev;
+ VkPhysicalDevice phys_dev;
+ VkSurfaceKHR surf;
+ uint32_t backbuffer_index;
+ Texture_Id backbuffer_id;
+ Swap_Cap swap_cap;
+ int queue_index;
+ VkQueue queue;
+ Swapchain swapchain;
+ Context_Vk contexts[max_contexts];
+ Context_Vk* current_ctx;
+#ifdef DEBUG
+ VkDebugUtilsMessengerEXT msg;
+#endif
+
+ ID_Map<Texture_Id, Texture_Vk, max_textures> textures;
+ Texture_Id texture_count;
+
+ std::unordered_map<Render_Pass, Renderpass_Vk> rpo_cache;
+
+ Texture_Id alloc_texture(
+ VkImage img,
+ VkImageView view,
+ const Texture& copy
+ );
+
+ void init_internal();
+ void deinit_internal();
+ void init_ac();
+ void create_inst(const char** exts, int count);
+ void create_dev(Swap_Cap* swap_cap);
+ void find_exts(const char** exts, int& count);
+
+ bool has_validation();
+ void init_validation();
+
+ void create_surf();
+ void on_resize_internal(int w, int h);
+
+ Renderpass_Vk& create_rpo(const Render_Pass& rp);
+ Renderpass_Vk& get_rpo(const Render_Pass& rp);
+
+ void collect_garbage();
+};
+
+#ifdef DEBUG
+static VkBool32 debug_callback(
+ VkDebugUtilsMessageSeverityFlagBitsEXT sev,
+ VkDebugUtilsMessageTypeFlagsEXT type,
+ const VkDebugUtilsMessengerCallbackDataEXT* data,
+ void* uptr
+) {
+ (void)sev;
+ (void)uptr;
+ if (sev <= VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT)
+ return 0;
+ switch (sev) {
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT:
+ print("%s\n", data->pMessage);
+ break;
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT:
+ print_war("%s\n", data->pMessage);
+ break;
+ case VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT:
+ print_err("%s\n", data->pMessage);
+ break;
+ default: break;
+ }
+ pbreak((int)type);
+ return 0;
+}
+
+static VkResult create_dmesg(
+ Device_Vk* d,
+ const VkDebugUtilsMessengerCreateInfoEXT* information,
+ const VkAllocationCallbacks* allocator,
+ VkDebugUtilsMessengerEXT* messenger
+) {
+ PFN_vkCreateDebugUtilsMessengerEXT f;
+ f = (PFN_vkCreateDebugUtilsMessengerEXT)
+ vkGetInstanceProcAddr(
+ d->inst,
+ "vkCreateDebugUtilsMessengerEXT"
+ );
+ return f?
+ f(d->inst, information, allocator, messenger):
+ VK_ERROR_EXTENSION_NOT_PRESENT;
+}
+
+static void destroy_dmesg(
+ VkInstance instance,
+ VkDebugUtilsMessengerEXT messenger,
+ const VkAllocationCallbacks* allocator
+) {
+ PFN_vkDestroyDebugUtilsMessengerEXT f;
+ f = (PFN_vkDestroyDebugUtilsMessengerEXT)
+ vkGetInstanceProcAddr(
+ instance,
+ "vkDestroyDebugUtilsMessengerEXT"
+ );
+ if (f)
+ f(instance, messenger, allocator);
+}
+
+void Device_Vk::init_validation() {
+ VkDebugUtilsMessengerCreateInfoEXT mi{};
+ VkResult r;
+ mi.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
+ mi.messageSeverity =
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT;
+ mi.messageType =
+ VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT;
+ mi.pfnUserCallback = debug_callback;
+ r = create_dmesg(
+ this,
+ &mi,
+ &ac,
+ &msg
+ );
+ if (r != VK_SUCCESS) {
+ print_err("Failed to create debug messenger.\n");
+ pbreak(r);
+ }
+}
+
+#endif
+
+
+bool Device_Vk::has_validation() {
+ unsigned count, i;
+ int f;
+ VkLayerProperties* props;
+ VkResult r;
+ r = vkEnumerateInstanceLayerProperties(&count, 0);
+ if (!count || r != VK_SUCCESS) return 0;
+ props = (VkLayerProperties*)heap_alloc(heap, count * sizeof *props);
+ vkEnumerateInstanceLayerProperties(&count, props);
+ for (f = 0, i = 0; i < count; i++) {
+ if (strcmp(
+ props[i].layerName,
+ "VK_LAYER_KHRONOS_validation"
+ )) {
+ f = 1;
+ break;
+ }
+ }
+ heap_free(heap, props);
+ return f;
+}
+
+void Device_Vk::find_exts(const char** exts, int& count) {
+ app->get_vk_exts(exts, count);
+#ifdef DEBUG
+ exts[count++] = VK_EXT_DEBUG_UTILS_EXTENSION_NAME;
+#endif
+}
+
+void Device_Vk::init_ac() {
+ ac.pUserData = this;
+ ac.pfnAllocation = vk_alloc;
+ ac.pfnReallocation = vk_realloc;
+ ac.pfnFree = vk_free;
+ ac.pfnInternalAllocation = 0;
+ ac.pfnInternalFree = 0;
+}
+
+void Device_Vk::create_inst(const char** exts, int ext_count) {
+ VkInstanceCreateInfo ci{};
+ VkApplicationInfo ai{};
+ VkResult r;
+#ifdef DEBUG
+ const char* vln = "VK_LAYER_KHRONOS_validation";
+#endif
+ ai.apiVersion = VK_API_VERSION_1_0;
+ ai.pApplicationName = "C2";
+ ci.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
+ ci.pApplicationInfo = &ai;
+ ci.enabledExtensionCount = (unsigned)ext_count;
+ ci.ppEnabledExtensionNames = exts;
+#ifdef DEBUG
+ ci.enabledLayerCount = has_validation();
+ ci.ppEnabledLayerNames = &vln;
+ if (!ci.enabledLayerCount)
+ print_war("No validation layers.");
+#endif
+ r = vkCreateInstance(&ci, &ac, &inst);
+ if (r != VK_SUCCESS) {
+ print_err("Failed to create a Vulkan instance\n");
+ pbreak(r);
+ }
+}
+
+static int proc_swap(
+ Device_Vk* d,
+ VkPhysicalDevice dev,
+ Swap_Cap* sc
+) {
+ get_swap_cap(d, dev, d->surf, sc);
+ return sc->fmt_count > 0 && sc->pm_count > 0;
+}
+
+int proc_qf(Device_Vk* d, VkPhysicalDevice dev) {
+ unsigned fc, i;
+ int r = 0;
+ VkBool32 press;
+ VkQueueFamilyProperties* fs, * p;
+ vkGetPhysicalDeviceQueueFamilyProperties(
+ dev,
+ &fc,
+ 0
+ );
+ fs = (VkQueueFamilyProperties*)heap_alloc(d->heap, (int)fc * sizeof *fs);
+ vkGetPhysicalDeviceQueueFamilyProperties(
+ dev,
+ &fc,
+ fs
+ );
+ for (i = 0; i < fc; i++) {
+ p = &fs[i];
+ vkGetPhysicalDeviceSurfaceSupportKHR(
+ dev,
+ i,
+ d->surf,
+ &press
+ );
+ if (
+ p->queueFlags & VK_QUEUE_GRAPHICS_BIT &&
+ press
+ ) {
+ d->queue_index = (int)i;
+ r = 1;
+ goto fin;
+ }
+ }
+fin:
+ heap_free(d->heap, fs);
+ return r;
+}
+
+static int sup_exts(Device_Vk* d, VkPhysicalDevice dev) {
+ int r = 0, i, f;
+ unsigned c, j;
+ int extc = sizeof *device_exts / sizeof *device_exts;
+ VkExtensionProperties* avail;
+ vkEnumerateDeviceExtensionProperties(dev, 0, &c, 0);
+ avail = (VkExtensionProperties*)heap_alloc(d->heap, c * sizeof *avail);
+ vkEnumerateDeviceExtensionProperties(
+ dev,
+ 0,
+ &c,
+ avail
+ );
+ for (i = 0; i < extc; i++) {
+ f = 0;
+ for (j = 0; j < c; j++) {
+ if (!strcmp(device_exts[i], avail[j].extensionName)) {
+ f = 1;
+ break;
+ }
+ }
+ if (!f) goto fin;
+ }
+ r = 1;
+fin:
+ heap_free(d->heap, avail);
+ return r;
+}
+
+VkPhysicalDevice get_phys_dev(Device_Vk* d, Swap_Cap* sc) {
+ unsigned dc, i;
+ VkPhysicalDevice* devs, dev;
+ vkEnumeratePhysicalDevices(d->inst, &dc, 0);
+ if (!dc) {
+ print_err(
+ "Couldn't find any vulkan-capable graphics hardware.\n"
+ );
+ pbreak(400);
+ }
+ devs = (VkPhysicalDevice*)heap_alloc(d->heap, (int)dc * sizeof *devs);
+ vkEnumeratePhysicalDevices(d->inst, &dc, devs);
+ for (i = 0; i < dc; i++) {
+ dev = devs[i];
+ if (
+ proc_swap(d, dev, sc) &&
+ proc_qf(d, dev) &&
+ sup_exts(d, dev)
+ ) {
+ heap_free(d->heap, devs);
+ return dev;
+ }
+ deinit_swap_cap(d, sc);
+ }
+ print_err("Couldn't find a suitable GPU.\n");
+ pbreak(401);
+ heap_free(d->heap, devs);
+ return 0;
+}
+
+void Device_Vk::create_dev(Swap_Cap* swap_cap) {
+ const float priority = 0.0f;
+ VkDeviceQueueCreateInfo qi{};
+ VkDeviceCreateInfo di{};
+ VkPhysicalDeviceFeatures pdf{};
+ VkResult r;
+ phys_dev = get_phys_dev(this, swap_cap);
+ qi.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
+ qi.queueFamilyIndex = queue_index;
+ qi.queueCount = 1;
+ qi.pQueuePriorities = &priority;
+ di.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
+ di.pQueueCreateInfos = &qi;
+ di.queueCreateInfoCount = 1;
+ di.pEnabledFeatures = &pdf;
+ di.enabledExtensionCount =
+ sizeof device_exts / sizeof *device_exts;
+ di.ppEnabledExtensionNames = device_exts;
+ di.pNext = 0;
+ r = vkCreateDevice(
+ phys_dev,
+ &di,
+ &ac,
+ &dev
+ );
+ if (r != VK_SUCCESS) {
+ print_err("Failed to create a Vulkan device.\n");
+ pbreak(r);
+ }
+}
+
+void Device_Vk::init_internal() {
+ const char* exts[16];
+ int ext_count = 0, i;
+ gladLoaderLoadVulkan(0, 0, 0);
+ textures.init();
+ texture_count = 1;
+ find_exts(exts, ext_count);
+ init_ac();
+ create_inst(exts, ext_count);
+#ifdef DEBUG
+ if (has_validation())
+ init_validation();
+#endif
+ surf = app_create_vk_surface(app, inst);
+ create_dev(&swap_cap);
+ gladLoaderLoadVulkan(inst, phys_dev, dev);
+ vkGetDeviceQueue(dev, (uint32_t)queue_index, 0, &queue);
+ swapchain.init(*app, this);
+ for (i = 0; i < max_contexts; i++)
+ contexts[i].state = context_state_avail;
+}
+
+void Device_Vk::deinit_internal() {
+ swapchain.destroy(this);
+ deinit_swap_cap(this, &swap_cap);
+ app_destroy_vk_surface(app, inst, surf);
+ vkDestroyDevice(dev, &ac);
+#ifdef DEBUG
+ destroy_dmesg(
+ inst,
+ msg,
+ &ac
+ );
+#endif
+ vkDestroyInstance(inst, &ac);
+}
+
+void Device_Vk::on_resize_internal(int w, int h) {
+ (void)w;
+ (void)h;
+ vkDeviceWaitIdle(dev);
+ deinit_swap_cap(this, &swap_cap);
+ get_swap_cap(this, phys_dev, surf, &swap_cap);
+ swapchain.recreate(*app, this);
+}
+
+Renderpass_Vk& Device_Vk::create_rpo(const Render_Pass& rp) {
+ VkClearValue clear{};
+ clear.color.float32[0] = (float)rp.clear.r / 255.0f;
+ clear.color.float32[1] = (float)rp.clear.g / 255.0f;
+ clear.color.float32[2] = (float)rp.clear.b / 255.0f;
+ clear.color.float32[3] = (float)rp.clear.a / 255.0f;
+ Renderpass_Vk rpo;
+ rpo.init_rp(this, rp);
+ rpo.init_fb(this, rp);
+ rpo.age = 0;
+ rpo.clear = clear;
+ rpo_cache[rp] = rpo;
+ return rpo_cache[rp];
+}
+
+Renderpass_Vk& Device_Vk::get_rpo(const Render_Pass& rp) {
+ auto rpo_index = rpo_cache.find(rp);
+ if (rpo_index == rpo_cache.end())
+ return create_rpo(rp);
+ return rpo_index->second;
+}
+
+void Renderpass_Vk::destroy(Device_Vk* dev) {
+ vkDestroyRenderPass(dev->dev, rpo, &dev->ac);
+ vkDestroyFramebuffer(dev->dev, fbo, &dev->ac);
+}
+
+void Device_Vk::collect_garbage() {
+ for (auto i = rpo_cache.begin(); i != rpo_cache.end();) {
+ auto& rp = i->second;
+ rp.age++;
+ if (rp.age > 3) {
+ rp.destroy(this);
+ i = rpo_cache.erase(i);
+ } else ++i;
+ }
+}
+
+void Renderpass_Vk::init_rp(
+ Device_Vk* dev,
+ const Render_Pass& rp
+) {
+ VkRenderPassCreateInfo ri{};
+ VkAttachmentDescription ad{};
+ VkAttachmentReference ar{};
+ VkSubpassDescription sd{};
+ VkResult r;
+
+ ad.format = dev->swapchain.format.format;
+ ad.samples = VK_SAMPLE_COUNT_1_BIT;
+ ad.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
+ ad.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ ad.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ ad.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ ad.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ ad.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
+
+ ar.attachment = 0;
+ ar.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+
+ sd.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ sd.colorAttachmentCount = 1;
+ sd.pColorAttachments = &ar;
+
+ ri.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
+ ri.attachmentCount = 1;
+ ri.pAttachments = &ad;
+ ri.subpassCount = 1;
+ ri.pSubpasses = &sd;
+
+ r = vkCreateRenderPass(dev->dev, &ri, &dev->ac, &rpo);
+ if (r != VK_SUCCESS) {
+ print_err("Failed to create a render pass\n");
+ pbreak(r);
+ }
+}
+
+void Renderpass_Vk::init_fb(
+ Device_Vk* dev,
+ const Render_Pass& rp
+) {
+ const Texture_Vk& texture =
+ *(const Texture_Vk*)&dev->get_texture(rp.target);
+ VkResult r;
+ VkFramebufferCreateInfo fbi{};
+ fbi.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
+ fbi.renderPass = rpo;
+ fbi.width = texture.w;
+ fbi.height = texture.h;
+ fbi.layers = 1;
+ fbi.attachmentCount = 1;
+ fbi.pAttachments = &texture.view;
+ r = vkCreateFramebuffer(dev->dev, &fbi, &dev->ac, &fbo);
+ if (r != VK_SUCCESS) {
+ print_err("Failed to create a framebuffer.\n");
+ pbreak(r);
+ }
+}
+
+static int get_image_count(const Swap_Cap& s) {
+ const VkSurfaceCapabilitiesKHR& cap = s.cap;
+ return cap.minImageCount + (cap.minImageCount < cap.maxImageCount);
+}
+
+static VkExtent2D choose_swap_extent(const App& app, const VkSurfaceCapabilitiesKHR& cap) {
+ VkExtent2D r = { (uint32_t)app.w, (uint32_t)app.h };
+ r.width = std::min(r.width, cap.maxImageExtent.width);
+ r.height = std::min(r.height, cap.maxImageExtent.height);
+ r.width = std::max(r.width, cap.minImageExtent.width);
+ r.height = std::max(r.height, cap.minImageExtent.height);
+ return r;
+}
+
+static VkSurfaceFormatKHR choose_swap_format(const Swap_Cap& cap) {
+ unsigned i;
+ for (i = 0; i < cap.fmt_count; i++) {
+ const auto& fmt = cap.fmts[i];
+ if (
+ fmt.format == VK_FORMAT_B8G8R8A8_SRGB &&
+ fmt.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR
+ ) return fmt;
+ }
+ print_err("Failed to find a surface that supports VK_FORMAT_B8G8R8A8_SRGB.\n");
+ return cap.fmts[0];
+}
+
+static VkPresentModeKHR choose_swap_mode(const Swap_Cap& cap, bool vsync) {
+ (void)vsync;
+ (void)cap; /* todo */
+ return VK_PRESENT_MODE_FIFO_KHR;
+}
+
+static VkImageView make_view(
+ Device_Vk* dev,
+ VkImage image,
+ VkFormat fmt,
+ VkImageAspectFlagBits flags
+) {
+ VkImageViewCreateInfo vi{};
+ VkResult r;
+ VkImageView view;
+ vi.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ vi.image = image;
+ vi.viewType = VK_IMAGE_VIEW_TYPE_2D;
+ vi.format = fmt;
+ vi.subresourceRange.aspectMask = flags;
+ vi.subresourceRange.baseMipLevel = 0;
+ vi.subresourceRange.levelCount = 1;
+ vi.subresourceRange.baseArrayLayer = 0;
+ vi.subresourceRange.layerCount = 1;
+ r = vkCreateImageView(dev->dev, &vi, &dev->ac, &view);
+ if (r != VK_SUCCESS) {
+ print_err("Failed to make image view.\n");
+ pbreak((int)r);
+ }
+ return view;
+}
+
+void Swapchain::init(const App& app, Device_Vk* dev) {
+ swapchain = (VkSwapchainKHR)0;
+ textures = 0;
+ initr(app, dev);
+}
+
+void Swapchain::initr(const App& app, Device_Vk* dev) {
+ image_count = get_image_count(dev->swap_cap);
+ size = choose_swap_extent(app, dev->swap_cap.cap);
+ format = choose_swap_format(dev->swap_cap);
+ mode = choose_swap_mode(dev->swap_cap, false);
+ {
+ VkResult r;
+ VkSwapchainCreateInfoKHR si{};
+ si.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
+ si.surface = dev->surf;
+ si.minImageCount = image_count;
+ si.imageFormat = format.format;
+ si.imageColorSpace = format.colorSpace;
+ si.imageExtent = size;
+ si.imageArrayLayers = 1;
+ si.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ si.preTransform = dev->swap_cap.cap.currentTransform;
+ si.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+ si.presentMode = mode;
+ si.clipped = VK_TRUE;
+ si.oldSwapchain = swapchain;
+ si.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ r = vkCreateSwapchainKHR(dev->dev, &si, &dev->ac, &swapchain);
+ if (r != VK_SUCCESS) {
+ print_err("Failed to create swapchain.\n");
+ pbreak(r);
+ }
+ }
+ textures = (Texture_Id*)heap_alloc(dev->heap, sizeof *textures * image_count);
+ get_images(dev);
+}
+
+void Swapchain::recreate(const App& app, Device_Vk* dev) {
+ Swapchain old = *this;
+ vkDeviceWaitIdle(dev->dev);
+ initr(app, dev);
+ old.destroy(dev);
+}
+
+void Swapchain::get_images(Device_Vk* dev) {
+ unsigned count;
+ int i;
+ VkImage* images = (VkImage*)heap_alloc(
+ dev->heap,
+ sizeof *images * image_count
+ );
+ Texture info{};
+ info.w = size.width;
+ info.h = size.height;
+ info.alias = true;
+ vkGetSwapchainImagesKHR(dev->dev, swapchain, &count, images);
+ assert(count == (unsigned)image_count);
+ for (i = 0; i < image_count; i++) {
+ VkImageView view = make_view(dev,
+ images[i],
+ format.format,
+ VK_IMAGE_ASPECT_COLOR_BIT
+ );
+ textures[i] = dev->alloc_texture(images[i], view, info);
+ }
+ heap_free(dev->heap, images);
+}
+
+void Swapchain::destroy(Device_Vk* dev) {
+ int i;
+ for (i = 0; i < image_count; i++)
+ dev->destroy_texture(textures[i]);
+ vkDestroySwapchainKHR(dev->dev, swapchain, &dev->ac);
+ heap_free(dev->heap, textures);
+ textures = 0;
+}
+
+Device* Device::create(Arena* a, App* ap) {
+ Device_Vk* d = (Device_Vk*)arena_alloc(a, sizeof *d);
+ new(d) Device_Vk();
+ d->init(a, ap);
+ return d;
+}
+
+void Device::init(Arena* a, App* ap) {
+ void* hm;
+ arena = a;
+ app = ap;
+ hm = arena_alloc(a, device_heap_size);
+ heap = (Heap*)arena_alloc(a, sizeof *heap);
+ init_heap(heap, hm, device_heap_size);
+ ((Device_Vk*)this)->init_internal();
+}
+
+void Device::destroy() {
+ ((Device_Vk*)this)->deinit_internal();
+}
+
+void Device::on_resize() {
+ ((Device_Vk*)this)->on_resize_internal(app->w, app->h);
+}
+
+void Device::begin_frame() {
+ Device_Vk* dev = (Device_Vk*)this;
+ dev->collect_garbage();
+ dev->current_ctx = (Context_Vk*)&acquire();
+ vkAcquireNextImageKHR(
+ dev->dev,
+ dev->swapchain.swapchain,
+ UINT64_MAX,
+ dev->current_ctx->semaphore,
+ VK_NULL_HANDLE,
+ &dev->backbuffer_index
+ );
+ dev->backbuffer_id = dev->swapchain.textures[dev->backbuffer_index];
+}
+
+void Device::submit(Context& ctx_) {
+ Context_Vk* ctx = (Context_Vk*)&ctx_;
+ Device_Vk* dev = (Device_Vk*)this;
+ VkPipelineStageFlags stage =
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ VkSubmitInfo si{};
+ si.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ si.waitSemaphoreCount = 1;
+ si.pWaitSemaphores = &ctx->semaphore;
+ si.pWaitDstStageMask = &stage;
+ si.signalSemaphoreCount = 1;
+ si.pSignalSemaphores = &ctx->semaphore;
+ si.commandBufferCount = 1;
+ si.pCommandBuffers = &ctx->cb;
+ vkEndCommandBuffer(ctx->cb);
+ vkQueueSubmit(dev->queue, 1, &si, ctx->fence);
+ ctx->wait(*dev);
+ ctx->release();
+}
+
+void Device::present() {
+ Device_Vk* dev = (Device_Vk*)this;
+ Context_Vk* ctx = dev->current_ctx;
+ VkPresentInfoKHR pi{};
+ VkSemaphore s[1];
+ VkSwapchainKHR sw[1];
+ submit(*ctx);
+ s[0] = ctx->semaphore;
+ sw[0] = dev->swapchain.swapchain;
+ pi.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
+ pi.waitSemaphoreCount = 1;
+ pi.pWaitSemaphores = s;
+ pi.swapchainCount = 1;
+ pi.pSwapchains = sw;
+ pi.pImageIndices = &dev->backbuffer_index;
+ vkQueuePresentKHR(dev->queue, &pi);
+
+/* print("%d\n", dev->heap->blocks);
+ print("%d\n", dev->rpo_cache.size());
+ heap_defrag(dev->heap);*/
+}
+
+Texture_Id Device::get_backbuffer() {
+ return ((Device_Vk*)this)->backbuffer_id;
+}
+
+Texture& Device::get_texture(Texture_Id id) {
+ return ((Device_Vk*)this)->textures[id];
+}
+
+Texture_Id Device_Vk::alloc_texture(
+ VkImage img,
+ VkImageView view,
+ const Texture& copy
+) {
+ Texture_Id id = texture_count++;
+ Texture_Vk tex;
+ assert(id < max_textures);
+ memcpy(&tex, &copy, sizeof(Texture));
+ tex.image = img;
+ tex.view = view;
+ textures.set(id, tex);
+ return id;
+}
+
+void Device::destroy_texture(Texture_Id id) {
+ Device_Vk* dev = (Device_Vk*)this;
+ Texture_Vk& tex = dev->textures[id];
+ if (!tex.alias)
+ vkDestroyImage(dev->dev, tex.image, &dev->ac);
+ vkDestroyImageView(dev->dev, tex.view, &dev->ac);
+}
+
+void Context::wait(Device& d) {
+ Context_Vk* ctx = (Context_Vk*)this;
+ Device_Vk* dev = (Device_Vk*)&d;
+ vkWaitForFences(
+ dev->dev,
+ 1,
+ &ctx->fence,
+ VK_TRUE,
+ UINT64_MAX
+ );
+}
+
+void Context::submit(
+ Device& d,
+ const Draw& draw,
+ const Pipeline& p,
+ const Render_Pass& rp
+) {
+ Device_Vk* dev = (Device_Vk*)&d;
+ (void)draw;
+ (void)p;
+ (void)rp;
+ (void)dev;
+ assert(0);
+ /* todo */
+}
+
+void Context::submit(
+ Device& d,
+ const Draw* draws,
+ int count,
+ const Pipeline& p,
+ const Render_Pass& rp
+) {
+ Device_Vk* dev = (Device_Vk*)&d;
+ (void)draws;
+ (void)count;
+ (void)p;
+ (void)rp;
+ (void)dev;
+ assert(0);
+ /* todo */
+}
+
+void Context::submit(Device& d, const Render_Pass& rp) {
+ Device_Vk* dev = (Device_Vk*)&d;
+ Context_Vk* ctx = (Context_Vk*)this;
+ Renderpass_Vk& rpo = dev->get_rpo(rp);
+ VkRenderPassBeginInfo rpbi{};
+ rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ rpbi.renderPass = rpo.rpo;
+ rpbi.framebuffer = rpo.fbo;
+ rpbi.renderArea.extent = dev->swapchain.size;
+ rpbi.clearValueCount = 1;
+ rpbi.pClearValues = &rpo.clear;
+ vkCmdBeginRenderPass(
+ ctx->cb,
+ &rpbi,
+ VK_SUBPASS_CONTENTS_INLINE
+ );
+ vkCmdEndRenderPass(ctx->cb);
+ rpo.on_submit();
+}
+
+void Context_Vk::init_pool(Device_Vk* dev) {
+ VkCommandPoolCreateInfo pi{};
+ VkResult r;
+ pi.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
+ pi.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
+ pi.queueFamilyIndex = (uint32_t)dev->queue_index;
+ r = vkCreateCommandPool(dev->dev, &pi, &dev->ac, &pool);
+ if (r != VK_SUCCESS) {
+ print_err("Failed to create a command pool.\n");
+ pbreak(r);
+ }
+}
+
+void Context_Vk::init_cb(Device_Vk* dev) {
+ VkCommandBufferAllocateInfo ci{};
+ VkResult r;
+ ci.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
+ ci.commandPool = pool;
+ ci.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
+ ci.commandBufferCount = 1;
+ r = vkAllocateCommandBuffers(dev->dev, &ci, &cb);
+ if (r != VK_SUCCESS) {
+ print_err("Failed to allocate a command buffer.\n");
+ pbreak(r);
+ }
+}
+
+void Context_Vk::init_sync(Device_Vk* dev) {
+ VkFenceCreateInfo fi{};
+ VkSemaphoreCreateInfo si{};
+ VkResult r;
+ fi.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+ fi.flags = VK_FENCE_CREATE_SIGNALED_BIT;
+ r = vkCreateFence(dev->dev, &fi, &dev->ac, &fence);
+ if (r != VK_SUCCESS) {
+ print_err("Failed to create a fence.\n");
+ pbreak(r);
+ }
+ si.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
+ r = vkCreateSemaphore(dev->dev, &si, &dev->ac, &semaphore);
+ if (r != VK_SUCCESS) {
+ print_err("Failed to create a semaphore.\n");
+ pbreak(r);
+ }
+}
+
+void Context_Vk::init(Device_Vk* dev) {
+ init_pool(dev);
+ init_cb(dev);
+ init_sync(dev);
+ state |= context_state_init;
+}
+
+void Context_Vk::begin_record(Device_Vk* dev) {
+ VkCommandBufferBeginInfo bi{};
+ bi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ vkResetFences(dev->dev, 1, &fence);
+ vkResetCommandBuffer(cb, 0);
+ vkBeginCommandBuffer(cb, &bi);
+}
+
+Context_Vk& Context_Vk::acquire(Device_Vk* dev) {
+ if (~state & context_state_init)
+ init(dev);
+ state &= ~context_state_avail;
+ begin_record(dev);
+ return *this;
+}
+
+void Context_Vk::release() {
+ state |= context_state_avail;
+}
+
+Context& Device::acquire() {
+ Device_Vk* vk = (Device_Vk*)this;
+ int i;
+ for (i = 0; i < max_contexts; i++) {
+ if (vk->contexts[i].state & context_state_avail)
+ return vk->contexts[i].acquire(vk);
+ }
+ print_err("Too many active contexts!\n");
+ print("Probably a submit was missed.\n");
+ pbreak(10000);
+ return vk->contexts[0];
+}
+
+Context& Device::get_ctx() {
+ Device_Vk* vk = (Device_Vk*)this;
+ return *vk->current_ctx;
+}
+
+/* todo proper asset manager which will load this stuff */
+bool Shader_Vk::init(Device_Vk* dev, FILE* f) {
+ char magic[4];
+ int attr_count, target_count, i;
+ fread(magic, 4, 1, f);
+ if (
+ magic[0] != 'C' ||
+ magic[1] != 'S' ||
+ magic[2] != 'H' ||
+ magic[3] != '2'
+ ) return false;
+ fread(&type, 4, 1, f);
+ fread(&attr_count, 4, 1, f);
+ fread(&target_count, 4, 1, f);
+ fseek(f, 32 * attr_count + 32 * target_count, SEEK_CUR);
+ for (i = 0; i < shader_type_count; i++) {
+ int o, s;
+ fread(&o, 4, 1, f);
+ fread(&s, 4, 1, f);
+ if (o) {
+ bool r;
+ int before = ftell(f);
+ char* buf = (char*)heap_alloc(dev->heap, s);
+ fseek(f, o, SEEK_SET);
+ fread(buf, 1, s, f);
+ r = init_module(dev, i, buf, s);
+ heap_free(dev->heap, buf);
+ fseek(f, before, SEEK_SET);
+ if (!r) return false;
+ } else {
+ modules[i] = VK_NULL_HANDLE;
+ }
+ }
+ return true;
+}
+
+bool Shader_Vk::init_module(
+ Device_Vk* dev,
+ int stage,
+ char* buf,
+ int size
+) {
+ VkResult r;
+ VkShaderModule m;
+ VkShaderModuleCreateInfo mi{};
+ mi.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+ mi.codeSize = size;
+ mi.pCode = (uint32_t*)buf;
+ r = vkCreateShaderModule(dev->dev, &mi, &dev->ac, &m);
+ modules[stage] = m;
+ return r == VK_SUCCESS;
+}
+
+void Shader_Vk::destroy_internal(Device_Vk* dev) {
+ int i;
+ for (i = 0; i < shader_type_count; i++)
+ if (modules[i])
+ vkDestroyShaderModule(dev->dev, modules[i], &dev->ac);
+}
+
+void Shader::destroy(Device* dev) {
+ ((Shader_Vk*)this)->destroy_internal((Device_Vk*)dev);
+}
+
+Shader* Device::load_shader(const char* fname) {
+ FILE* f = fopen(fname, "rb");
+ Shader_Vk* s;
+ bool r;
+ if (!f) return 0;
+ s = (Shader_Vk*)heap_alloc(heap, sizeof *s);
+ r = s->init((Device_Vk*)this, f);
+ fclose(f);
+ if (!r) {
+ heap_free(heap, s);
+ return 0;
+ }
+ return s;
+}