diff --git a/add_shader.cmake b/add_shader.cmake index e4738ca..d9c2ccb 100644 --- a/add_shader.cmake +++ b/add_shader.cmake @@ -33,4 +33,10 @@ function(add_shader TARGET SHADER) # Make sure our build depends on this output. set_source_files_properties(${current-output-path} PROPERTIES GENERATED TRUE) target_sources(${TARGET} PRIVATE ${current-output-path}) -endfunction(add_shader) \ No newline at end of file +endfunction(add_shader) + +function(add_shaders TARGET SHADERS) + foreach(shader IN ${SHADERS}) + add_shader(TARGET ${shader}) + endforeach() +endfunction(add_shaders) \ No newline at end of file diff --git a/aster/buffer.cpp b/aster/buffer.cpp index 92746c3..2085cc5 100644 --- a/aster/buffer.cpp +++ b/aster/buffer.cpp @@ -95,17 +95,51 @@ UniformBuffer::Init(const Device *device, const usize size, const cstr name) void StorageBuffer::Init(const Device *device, usize size, bool hostVisible, cstr name) { + Init(device, size, hostVisible, false, name); +} + +void +StorageBuffer::Init(const Device *device, usize size, bool hostVisible, bool deviceAddress, cstr name) +{ + vk::BufferUsageFlags usage = vk::BufferUsageFlagBits::eStorageBuffer; + if (deviceAddress) + { + usage |= vk::BufferUsageFlagBits::eShaderDeviceAddress; + } if (hostVisible) { - Allocate(device, size, vk::BufferUsageFlagBits::eStorageBuffer, + Allocate(device, size, usage, VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT, VMA_MEMORY_USAGE_AUTO, name); } else { - Allocate(device, size, vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eTransferDst, - 0, VMA_MEMORY_USAGE_AUTO, name); + usage |= vk::BufferUsageFlagBits::eTransferDst; + Allocate(device, size, usage, 0, + VMA_MEMORY_USAGE_AUTO, name); + } +} + +void +StorageIndexBuffer::Init(const Device *device, usize size, bool hostVisible, bool deviceAddress, cstr name) +{ + vk::BufferUsageFlags usage = vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eIndexBuffer; + if (deviceAddress) + { + usage |= vk::BufferUsageFlagBits::eShaderDeviceAddress; + } + if (hostVisible) + { + Allocate(device, size, usage, + VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT, + VMA_MEMORY_USAGE_AUTO, name); + } + else + { + usage |= vk::BufferUsageFlagBits::eTransferDst; + Allocate(device, size, usage, 0, VMA_MEMORY_USAGE_AUTO, name); } } diff --git a/aster/buffer.h b/aster/buffer.h index 4bb7917..cee1edb 100644 --- a/aster/buffer.h +++ b/aster/buffer.h @@ -9,6 +9,8 @@ struct Device; +// TODO Refactor the Buffer Hierarchy + struct Buffer { vk::Buffer m_Buffer = nullptr; @@ -49,6 +51,12 @@ struct UniformBuffer : Buffer struct StorageBuffer : Buffer { void Init(const Device *device, usize size, bool hostVisible, cstr name = nullptr); + void Init(const Device *device, usize size, bool hostVisible, bool deviceAddress, cstr name = nullptr); +}; + +struct StorageIndexBuffer : StorageBuffer +{ + void Init(const Device *device, usize size, bool hostVisible, bool deviceAddress, cstr name = nullptr); }; struct VertexBuffer : Buffer diff --git a/aster/constants.h b/aster/constants.h index f6525b2..563bd8f 100644 --- a/aster/constants.h +++ b/aster/constants.h @@ -11,7 +11,7 @@ #include #include -#include +#include using c8 = char; using u8 = uint8_t; diff --git a/aster/device.cpp b/aster/device.cpp index 8d67dba..e6157f0 100644 --- a/aster/device.cpp +++ b/aster/device.cpp @@ -81,6 +81,7 @@ Device::Device(const Context *context, PhysicalDevice *physicalDevice, Features }; const VmaAllocatorCreateInfo allocatorCreateInfo = { + .flags = VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT, .physicalDevice = m_PhysicalDevice, .device = m_Device, .pVulkanFunctions = &vmaVulkanFunctions, diff --git a/samples/01_triangle/CMakeLists.txt b/samples/01_triangle/CMakeLists.txt index 9aa4b20..9a0b007 100644 --- a/samples/01_triangle/CMakeLists.txt +++ b/samples/01_triangle/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.13) -add_executable(triangle "triangle.cpp") +add_executable(triangle triangle.cpp) add_shader(triangle shader/triangle.vert.glsl) add_shader(triangle shader/triangle.frag.glsl) diff --git a/samples/03_model_render/model/AlphaBlendModeTest.glb b/samples/03_model_render/model/AlphaBlendModeTest.glb new file mode 100644 index 0000000..3c7e82a --- /dev/null +++ b/samples/03_model_render/model/AlphaBlendModeTest.glb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:688786ffa64990071cfd93c96f29917fddd3d254d3f0e48039d80d3b5ac0d8c7 +size 3017136 diff --git a/samples/03_model_render/model_render.cpp b/samples/03_model_render/model_render.cpp index 7b6fd1a..2296562 100644 --- a/samples/03_model_render/model_render.cpp +++ b/samples/03_model_render/model_render.cpp @@ -584,8 +584,6 @@ main(int, char **) cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline.m_Layout, 0, 1, &resourceManager.m_DescriptorSet, 0, nullptr); - cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline.m_Layout, 1, 1, &perFrameDescriptor, 0, - nullptr); cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline.m_Layout, 1, 1, &perFrameDescriptor, 0, nullptr); diff --git a/samples/04_scenes/CMakeLists.txt b/samples/04_scenes/CMakeLists.txt new file mode 100644 index 0000000..ef40a12 --- /dev/null +++ b/samples/04_scenes/CMakeLists.txt @@ -0,0 +1,29 @@ +# CMakeList.txt ; CMake project for box + +cmake_minimum_required(VERSION 3.13) + +#set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined -fsanitize=address") +find_path(TINYGLTF_INCLUDE_DIRS "tiny_gltf.h") + +find_package(EnTT REQUIRED CONFIG) + +add_executable(scene_render main.cpp + render_resource_manager.cpp render_resource_manager.h + asset_loader.cpp asset_loader.h + pipeline_utils.cpp pipeline_utils.h + core_components.h + ecs_adapter.h + camera.h) + +add_shader(scene_render shader/model.frag.glsl) +add_shader(scene_render shader/model.vert.glsl) +# add_shader(scene_render shader/model.vs.hlsl) + +target_link_libraries(scene_render PRIVATE aster_core) +target_link_libraries(scene_render PRIVATE util_helper) +target_link_libraries(scene_render PRIVATE EnTT::EnTT) + +target_include_directories(scene_render PRIVATE ${TINYGLTF_INCLUDE_DIRS}) + +add_resource_dir(scene_render model) +add_resource_dir(scene_render image) diff --git a/samples/04_scenes/asset_loader.cpp b/samples/04_scenes/asset_loader.cpp new file mode 100644 index 0000000..cf7d5b9 --- /dev/null +++ b/samples/04_scenes/asset_loader.cpp @@ -0,0 +1,1070 @@ +// ============================================= +// Aster: asset_loader.cpp +// Copyright (c) 2020-2024 Anish Bhobe +// ============================================= + +#define TINYGLTF_NOEXCEPTION +#define JSON_NOEXCEPTION + +#define TINYGLTF_IMPLEMENTATION +#define STB_IMAGE_IMPLEMENTATION +#define STB_IMAGE_WRITE_IMPLEMENTATION + +#include "asset_loader.h" + +#include "buffer.h" +#include "device.h" +#include "image.h" + +#include "core_components.h" +#include "helpers.h" +#include "render_resource_manager.h" + +#include +#include +#include +#include +#include + +#include + +#if defined(LoadImage) +#undef LoadImage +#endif + +struct Nodes; +constexpr vk::CommandBufferBeginInfo OneTimeCmdBeginInfo = {.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit}; + +vec4 +VectorToVec4(const std::vector &vec) +{ + if (vec.empty()) + { + return vec4{0.0f}; + } + assert(vec.size() == 4); + + return glm::make_vec4(vec.data()); +} + +vec3 +VectorToVec3(const std::vector &vec, const f32 defaultScalar = 0.0f) +{ + if (vec.empty()) + { + return vec3{defaultScalar}; + } + assert(vec.size() == 3); + + return glm::make_vec3(vec.data()); +} + +quat +VectorToQuat(const std::vector &vec) +{ + if (vec.empty()) + { + return glm::identity(); + } + assert(vec.size() == 4); + + return glm::make_quat(vec.data()); +} + +void +Model::Destroy(RenderResourceManager *resourceManager, EcsRegistry *registry) +{ + for (auto texture : m_Textures) + { + resourceManager->Release(texture); + } + m_Textures.clear(); + + registry->destroy(m_Entities.begin(), m_Entities.end()); + m_Entities.clear(); + + resourceManager->Release(Take(m_IndexHandle)); + resourceManager->Release(Take(m_VertexDataHandle)); + resourceManager->Release(Take(m_VertexDataHandle)); + resourceManager->Release(Take(m_MaterialHandle)); + + m_RootEntity = {}; +} + +void +AssetLoader::LoadHdrImage(Texture *texture, cstr path, cstr name) const +{ + const Device *pDevice = m_ResourceManager->m_Device; + ERROR_IF(texture->IsValid(), "Expected invalid image.") THEN_ABORT(-1); + + i32 x, y, nChannels; + f32 *data = stbi_loadf(path, &x, &y, &nChannels, 4); + assert(nChannels == 3); + + ERROR_IF(!data, "Could not load {}", path) THEN_ABORT(-1); + + u32 width = Cast(x); + u32 height = Cast(y); + + StagingBuffer stagingBuffer; + texture->Init(m_ResourceManager->m_Device, {width, height}, vk::Format::eR32G32B32A32Sfloat, false, path); + assert(texture->IsValid()); + stagingBuffer.Init(m_ResourceManager->m_Device, (sizeof *data) * x * y * 4, "HDR Staging Buffer"); + stagingBuffer.Write(m_ResourceManager->m_Device, 0, stagingBuffer.GetSize(), data); + + stbi_image_free(data); + +#pragma region Setup Copy/Sync primitives + vk::BufferImageCopy2 copyRegion = { + .bufferOffset = 0, + .bufferRowLength = width, + .bufferImageHeight = height, + .imageSubresource = + { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .mipLevel = 0, + .baseArrayLayer = 0, + .layerCount = 1, + }, + .imageOffset = {0, 0, 0}, + .imageExtent = texture->m_Extent, + }; + vk::CopyBufferToImageInfo2 stagingInfo = { + .srcBuffer = stagingBuffer.m_Buffer, + .dstImage = texture->m_Image, + .dstImageLayout = vk::ImageLayout::eTransferDstOptimal, + .regionCount = 1, + .pRegions = ©Region, + }; + vk::ImageMemoryBarrier2 readyToStageBarrier = { + .srcStageMask = vk::PipelineStageFlagBits2::eAllCommands, + .srcAccessMask = vk::AccessFlagBits2::eNone, + .dstStageMask = vk::PipelineStageFlagBits2::eAllTransfer, + .dstAccessMask = vk::AccessFlagBits2::eTransferWrite, + .oldLayout = vk::ImageLayout::eUndefined, + .newLayout = vk::ImageLayout::eTransferDstOptimal, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = texture->m_Image, + .subresourceRange = + { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1, + }, + }; + vk::DependencyInfo readyToStageDependency = { + .memoryBarrierCount = 0, + .bufferMemoryBarrierCount = 0, + .imageMemoryBarrierCount = 1, + .pImageMemoryBarriers = &readyToStageBarrier, + }; + vk::ImageMemoryBarrier2 postStagingBarrier = { + .srcStageMask = vk::PipelineStageFlagBits2::eAllTransfer, + .srcAccessMask = vk::AccessFlagBits2::eTransferWrite, + .dstStageMask = vk::PipelineStageFlagBits2::eFragmentShader | vk::PipelineStageFlagBits2::eComputeShader, + .dstAccessMask = vk::AccessFlagBits2::eShaderRead, + .oldLayout = vk::ImageLayout::eTransferDstOptimal, + .newLayout = vk::ImageLayout::eShaderReadOnlyOptimal, + .srcQueueFamilyIndex = m_TransferQueueIndex, + .dstQueueFamilyIndex = m_GraphicsQueueIndex, + .image = texture->m_Image, + .subresourceRange = + { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1, + }, + }; + vk::DependencyInfo postStagingDependency = { + .memoryBarrierCount = 0, + .bufferMemoryBarrierCount = 0, + .imageMemoryBarrierCount = 1, + .pImageMemoryBarriers = &postStagingBarrier, + }; +#pragma endregion + + AbortIfFailed(m_CommandBuffer.begin(&OneTimeCmdBeginInfo)); + +#if !defined(ASTER_NDEBUG) + StackString<128> loadActionName = "Load: "; + loadActionName += name ? name : path; + vk::DebugUtilsLabelEXT debugLabel = { + .pLabelName = loadActionName.c_str(), + .color = std::array{1.0f, 1.0f, 1.0f, 1.0f}, + }; + m_CommandBuffer.beginDebugUtilsLabelEXT(&debugLabel); +#endif + + m_CommandBuffer.pipelineBarrier2(&readyToStageDependency); + m_CommandBuffer.copyBufferToImage2(&stagingInfo); + m_CommandBuffer.pipelineBarrier2(&postStagingDependency); + +#if !defined(ASTER_NDEBUG) + m_CommandBuffer.endDebugUtilsLabelEXT(); +#endif + + AbortIfFailed(m_CommandBuffer.end()); + + vk::SubmitInfo submitInfo = { + .waitSemaphoreCount = 0, + .pWaitDstStageMask = nullptr, + .commandBufferCount = 1, + .pCommandBuffers = &m_CommandBuffer, + }; + + vk::Fence fence; + vk::FenceCreateInfo fenceCreateInfo = {}; + AbortIfFailed(pDevice->m_Device.createFence(&fenceCreateInfo, nullptr, &fence)); + AbortIfFailed(m_TransferQueue.submit(1, &submitInfo, fence)); + AbortIfFailed(pDevice->m_Device.waitForFences(1, &fence, true, MaxValue)); + pDevice->m_Device.destroy(fence, nullptr); + + AbortIfFailed(pDevice->m_Device.resetCommandPool(m_CommandPool, {})); + + stagingBuffer.Destroy(pDevice); +} + +void +GenerateMipMaps(vk::CommandBuffer commandBuffer, Texture *texture, vk::ImageLayout initialLayout, + vk::ImageLayout finalLayout, vk::PipelineStageFlags2 prevStage, vk::PipelineStageFlags2 finalStage) +{ +#if !defined(ASTER_NDEBUG) + vk::DebugUtilsLabelEXT label = { + .pLabelName = "Generate Mipmap", + .color = std::array{0.9f, 0.9f, 0.9f, 1.0f}, + }; + commandBuffer.beginDebugUtilsLabelEXT(&label); +#endif + + vk::ImageMemoryBarrier2 imageStartBarrier = { + .srcStageMask = prevStage, + .srcAccessMask = vk::AccessFlagBits2::eNone, + .dstStageMask = vk::PipelineStageFlagBits2::eTransfer, + .dstAccessMask = vk::AccessFlagBits2::eTransferRead, + .oldLayout = initialLayout, + .newLayout = vk::ImageLayout::eTransferSrcOptimal, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = texture->m_Image, + .subresourceRange = + { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = texture->m_LayerCount, + }, + }; + vk::ImageMemoryBarrier2 mipsStartBarrier = imageStartBarrier; + mipsStartBarrier.dstAccessMask = vk::AccessFlagBits2::eTransferWrite; + mipsStartBarrier.oldLayout = vk::ImageLayout::eUndefined; + mipsStartBarrier.newLayout = vk::ImageLayout::eTransferDstOptimal; + mipsStartBarrier.subresourceRange = { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .baseMipLevel = 1, + .levelCount = texture->GetMipLevels() - 1, + .baseArrayLayer = 0, + .layerCount = texture->m_LayerCount, + }; + eastl::fixed_vector startBarriers = { + mipsStartBarrier, + }; + if (initialLayout != imageStartBarrier.newLayout) + { + startBarriers.push_back(imageStartBarrier); + } + + vk::DependencyInfo imageStartDependency = { + .imageMemoryBarrierCount = Cast(startBarriers.size()), + .pImageMemoryBarriers = startBarriers.data(), + }; + + vk::ImageMemoryBarrier2 nextMipBarrier = { + .srcStageMask = vk::PipelineStageFlagBits2::eTransfer, + .srcAccessMask = vk::AccessFlagBits2::eTransferWrite, + .dstStageMask = vk::PipelineStageFlagBits2::eTransfer, + .dstAccessMask = vk::AccessFlagBits2::eTransferRead, + .oldLayout = vk::ImageLayout::eTransferDstOptimal, + .newLayout = vk::ImageLayout::eTransferSrcOptimal, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = texture->m_Image, + .subresourceRange = + { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = texture->m_LayerCount, + }, + }; + vk::DependencyInfo interMipDependency = { + .imageMemoryBarrierCount = 1, + .pImageMemoryBarriers = &nextMipBarrier, + }; + + vk::ImageMemoryBarrier2 imageReadyBarrier = { + .srcStageMask = vk::PipelineStageFlagBits2::eTransfer, + .srcAccessMask = vk::AccessFlagBits2::eTransferWrite, + .dstStageMask = finalStage, + .dstAccessMask = vk::AccessFlagBits2::eShaderRead, + .oldLayout = vk::ImageLayout::eTransferSrcOptimal, + .newLayout = finalLayout, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = texture->m_Image, + .subresourceRange = + { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .baseMipLevel = 0, + .levelCount = texture->GetMipLevels(), + .baseArrayLayer = 0, + .layerCount = texture->m_LayerCount, + }, + }; + vk::DependencyInfo imageReadyDependency = { + .imageMemoryBarrierCount = 1, + .pImageMemoryBarriers = &imageReadyBarrier, + }; + + vk::ImageBlit2 blitRegion = { + .srcSubresource = + { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .baseArrayLayer = 0, + .layerCount = texture->m_LayerCount, + }, + .dstSubresource = + { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .baseArrayLayer = 0, + .layerCount = texture->m_LayerCount, + }, + }; + + vk::BlitImageInfo2 mipBlitInfo = { + .srcImage = texture->m_Image, + .srcImageLayout = vk::ImageLayout::eTransferSrcOptimal, + .dstImage = texture->m_Image, + .dstImageLayout = vk::ImageLayout::eTransferDstOptimal, + .regionCount = 1, + .pRegions = &blitRegion, + .filter = vk::Filter::eLinear, + }; + + auto calcNextMip = [](i32 prev) { return eastl::max(prev / 2, 1); }; + + // Mip Mapping + + commandBuffer.pipelineBarrier2(&imageStartDependency); + + i32 prevMipWidth = Cast(texture->m_Extent.width); + i32 prevMipHeight = Cast(texture->m_Extent.height); + + u32 maxPrevMip = texture->GetMipLevels() - 1; + for (u32 prevMipLevel = 0; prevMipLevel < maxPrevMip; ++prevMipLevel) + { + i32 currentMipWidth = calcNextMip(prevMipWidth); + i32 currentMipHeight = calcNextMip(prevMipHeight); + u32 currentMipLevel = prevMipLevel + 1; + + blitRegion.srcSubresource.mipLevel = prevMipLevel; + blitRegion.srcOffsets = std::array{ + vk::Offset3D{0, 0, 0}, + vk::Offset3D{prevMipWidth, prevMipHeight, 1}, + }; + blitRegion.dstSubresource.mipLevel = currentMipLevel; + blitRegion.dstOffsets = std::array{ + vk::Offset3D{0, 0, 0}, + vk::Offset3D{currentMipWidth, currentMipHeight, 1}, + }; + + nextMipBarrier.subresourceRange.baseMipLevel = currentMipLevel; + + commandBuffer.blitImage2(&mipBlitInfo); + commandBuffer.pipelineBarrier2(&interMipDependency); + + prevMipHeight = currentMipHeight; + prevMipWidth = currentMipWidth; + } + + commandBuffer.pipelineBarrier2(&imageReadyDependency); +#if !defined(ASTER_NDEBUG) + commandBuffer.endDebugUtilsLabelEXT(); +#endif +} + +TextureHandle +AssetLoader::LoadImageToGpu(StagingBuffer *stagingBuffer, tinygltf::Image *image, bool isSrgb) const +{ + assert(image->component == 4); + assert(image->height > 0 && image->width > 0); + + u32 height = Cast(image->height); + u32 width = Cast(image->width); + + vk::Format imageFormat = isSrgb ? vk::Format::eR8G8B8A8Srgb : vk::Format::eR8G8B8A8Unorm; + + Texture texture; + + usize byteSize = image->image.size(); + texture.Init(m_ResourceManager->m_Device, {.width = width, .height = height}, imageFormat, true, + image->name.data()); + stagingBuffer->Init(m_ResourceManager->m_Device, byteSize); + stagingBuffer->Write(m_ResourceManager->m_Device, 0, byteSize, image->image.data()); + +#if !defined(ASTER_NDEBUG) + StackString<128> loadActionName = "Load: "; + loadActionName += image->name.empty() ? "" : image->name.c_str(); + vk::DebugUtilsLabelEXT debugLabel = { + .pLabelName = loadActionName.c_str(), + .color = std::array{1.0f, 1.0f, 1.0f, 1.0f}, + }; + m_CommandBuffer.beginDebugUtilsLabelEXT(&debugLabel); +#endif + +#pragma region Barriers and Blits + + vk::ImageMemoryBarrier2 imageStartBarrier = { + .srcStageMask = vk::PipelineStageFlagBits2::eTopOfPipe, + .srcAccessMask = vk::AccessFlagBits2::eNone, + .dstStageMask = vk::PipelineStageFlagBits2::eTransfer, + .dstAccessMask = vk::AccessFlagBits2::eTransferWrite, + .oldLayout = vk::ImageLayout::eUndefined, + .newLayout = vk::ImageLayout::eTransferDstOptimal, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = texture.m_Image, + .subresourceRange = + { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1, + }, + }; + vk::DependencyInfo imageStartDependency = { + .memoryBarrierCount = 0, + .bufferMemoryBarrierCount = 0, + .imageMemoryBarrierCount = 1, + .pImageMemoryBarriers = &imageStartBarrier, + }; + + vk::ImageMemoryBarrier2 postStagingBarrier = { + .srcStageMask = vk::PipelineStageFlagBits2::eAllTransfer, + .dstStageMask = vk::PipelineStageFlagBits2::eAllTransfer, + .oldLayout = vk::ImageLayout::eTransferDstOptimal, + .newLayout = vk::ImageLayout::eTransferSrcOptimal, + .srcQueueFamilyIndex = m_TransferQueueIndex, + .dstQueueFamilyIndex = m_GraphicsQueueIndex, + .image = texture.m_Image, + .subresourceRange = + { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1, + }, + }; + ; + + vk::DependencyInfo postStagingDependency = { + .imageMemoryBarrierCount = 1, + .pImageMemoryBarriers = &postStagingBarrier, + }; + + vk::BufferImageCopy2 imageCopy = { + .bufferOffset = 0, + .bufferRowLength = Cast(image->width), + .bufferImageHeight = Cast(image->height), + .imageSubresource = + { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .mipLevel = 0, + .baseArrayLayer = 0, + .layerCount = 1, + }, + .imageOffset = {}, + .imageExtent = texture.m_Extent, + }; + vk::CopyBufferToImageInfo2 stagingCopyInfo = { + .srcBuffer = stagingBuffer->m_Buffer, + .dstImage = texture.m_Image, + .dstImageLayout = vk::ImageLayout::eTransferDstOptimal, + .regionCount = 1, + .pRegions = &imageCopy, + }; + +#pragma endregion + + m_CommandBuffer.pipelineBarrier2(&imageStartDependency); + m_CommandBuffer.copyBufferToImage2(&stagingCopyInfo); + m_CommandBuffer.pipelineBarrier2(&postStagingDependency); + + GenerateMipMaps(m_CommandBuffer, &texture, vk::ImageLayout::eTransferSrcOptimal, + vk::ImageLayout::eShaderReadOnlyOptimal); + +#if !defined(ASTER_NDEBUG) + m_CommandBuffer.endDebugUtilsLabelEXT(); +#endif + + return m_ResourceManager->CommitTexture(&texture); +} + +template +struct CRequiresPostLoadProcess +{ +}; + +void +AssetLoader::ProcessNode(tinygltf::Model *model, eastl::vector *vertexPositions, + eastl::vector *vertexData, eastl::vector *indices, + eastl::vector *entities, const std::function &loadMaterial, int current, + Entity parent) +{ + auto *node = &model->nodes[current]; + + CDynamicTransform dynamicTransform; + if (!node->matrix.empty()) + { + vec3 skew; + vec4 perspective; + mat4 matrix = glm::make_mat4(node->matrix.data()); + glm::decompose(matrix, dynamicTransform.m_Scale, dynamicTransform.m_Rotation, dynamicTransform.m_Position, skew, + perspective); + dynamicTransform.m_Rotation = glm::conjugate(dynamicTransform.m_Rotation); + } + else { + dynamicTransform = { + .m_Position = VectorToVec3(node->translation), + .m_Rotation = VectorToQuat(node->rotation), + .m_Scale = VectorToVec3(node->scale, 1.0f), + }; + } + + auto nodeRoot = m_Registry->create(); + entities->push_back(nodeRoot); + m_Registry->emplace(nodeRoot, dynamicTransform); + m_Registry->emplace(nodeRoot); + m_Registry->emplace>(nodeRoot, parent); + + if (node->mesh >= 0) + { + auto *mesh = &model->meshes[node->mesh]; + u32 vertexOffset = Cast(vertexPositions->size()); + u32 indexOffset = Cast(indices->size()); + for (auto &prim : mesh->primitives) + { + u32 vertexCount = 0; + u32 indexCount = 0; + +#pragma region Position + assert(prim.attributes.contains(APosition)); + assert(prim.mode == TINYGLTF_MODE_TRIANGLES); + { + tinygltf::Accessor *posAccessor = &model->accessors[prim.attributes[APosition]]; + + assert(posAccessor->count <= MaxValue); + + tinygltf::BufferView *posBufferView = &model->bufferViews[posAccessor->bufferView]; + tinygltf::Buffer *posBuffer = &model->buffers[posBufferView->buffer]; + usize byteOffset = (posAccessor->byteOffset + posBufferView->byteOffset); + + vertexCount = Cast(posAccessor->count); + vertexPositions->reserve(vertexOffset + vertexCount); + + if (posAccessor->type == TINYGLTF_TYPE_VEC4) + { + vec4 *data = Recast(posBuffer->data.data() + byteOffset); + vertexPositions->insert(vertexPositions->end(), data, data + vertexCount); + } + else if (posAccessor->type == TINYGLTF_TYPE_VEC3) + { + vec3 *data = Recast(posBuffer->data.data() + byteOffset); + for (u32 i = 0; i < vertexCount; ++i) + { + vertexPositions->push_back(vec4(data[i], 1.0f)); + } + } + else if (posAccessor->type == TINYGLTF_TYPE_VEC2) + { + vec2 *data = Recast(posBuffer->data.data() + byteOffset); + for (u32 i = 0; i < vertexCount; ++i) + { + vertexPositions->push_back(vec4(data[i], 0.0f, 1.0f)); + } + } + } +#pragma endregion +#pragma region Vertex Data + vertexData->resize(vertexPositions->size()); + + // Normal Coords + if (prim.attributes.contains(ANormal)) + { + tinygltf::Accessor *normAccessor = &model->accessors[prim.attributes[ANormal]]; + + assert(normAccessor->count <= MaxValue); + + tinygltf::BufferView *normBufferView = &model->bufferViews[normAccessor->bufferView]; + tinygltf::Buffer *normBuffer = &model->buffers[normBufferView->buffer]; + usize byteOffset = (normAccessor->byteOffset + normBufferView->byteOffset); + + if (normAccessor->type == TINYGLTF_TYPE_VEC4) + { + vec4 *data = Recast(normBuffer->data.data() + byteOffset); + + vec4 *end = data + vertexCount; + u32 idx = vertexOffset; + vec4 *it = data; + while (it != end) + { + vertexData->at(idx++).m_Normal = *(it++); + } + } + else if (normAccessor->type == TINYGLTF_TYPE_VEC3) + { + vec3 *data = Recast(normBuffer->data.data() + byteOffset); + for (u32 i = 0; i < vertexCount; ++i) + { + auto norm = vec4(data[i], 0.0f); + vertexData->at(vertexOffset + i).m_Normal = norm; + } + } + else if (normAccessor->type == TINYGLTF_TYPE_VEC2) + { + vec2 *data = Recast(normBuffer->data.data() + byteOffset); + for (u32 i = 0; i < vertexCount; ++i) + { + auto norm = vec4(data[i], 0.0f, 0.0f); + vertexData->at(vertexOffset + i).m_Normal = norm; + } + } + } + + // UV0 + if (prim.attributes.contains(ATexCoord0)) + { + tinygltf::Accessor *uvAccessor = &model->accessors[prim.attributes[ATexCoord0]]; + + assert(uvAccessor->count <= MaxValue); + + tinygltf::BufferView *uvBufferView = &model->bufferViews[uvAccessor->bufferView]; + tinygltf::Buffer *uvBuffer = &model->buffers[uvBufferView->buffer]; + usize byteOffset = (uvAccessor->byteOffset + uvBufferView->byteOffset); + + assert(uvAccessor->type == TINYGLTF_TYPE_VEC2 && + uvAccessor->componentType == TINYGLTF_COMPONENT_TYPE_FLOAT); + { + vec2 *data = Recast(uvBuffer->data.data() + byteOffset); + vec2 *end = data + vertexCount; + u32 idx = vertexOffset; + vec2 *it = data; + while (it != end) + { + vertexData->at(idx++).m_TexCoord0 = *(it++); + } + } + } + + if (prim.attributes.contains(AColor0)) + { + tinygltf::Accessor *colorAccessor = &model->accessors[prim.attributes[AColor0]]; + + assert(colorAccessor->count <= MaxValue); + + tinygltf::BufferView *colorBufferView = &model->bufferViews[colorAccessor->bufferView]; + tinygltf::Buffer *colorBuffer = &model->buffers[colorBufferView->buffer]; + usize byteOffset = (colorAccessor->byteOffset + colorBufferView->byteOffset); + + if (colorAccessor->type == TINYGLTF_TYPE_VEC4) + { + vec4 *data = Recast(colorBuffer->data.data() + byteOffset); + + vec4 *end = data + vertexCount; + u32 idx = vertexOffset; + vec4 *it = data; + while (it != end) + { + vertexData->at(idx++).m_Color0 = *(it++); + } + } + else if (colorAccessor->type == TINYGLTF_TYPE_VEC3) + { + vec3 *data = Recast(colorBuffer->data.data() + byteOffset); + for (u32 i = 0; i < vertexCount; ++i) + { + auto color = vec4(data[i], 1.0f); + vertexData->at(vertexOffset + i).m_Color0 = color; + } + } + } +#pragma endregion +#pragma region Indices + // Indices + if (prim.indices >= 0) + { + tinygltf::Accessor *indexAccessor = &model->accessors[prim.indices]; + + assert(indexAccessor->count <= MaxValue); + + tinygltf::BufferView *indexBufferView = &model->bufferViews[indexAccessor->bufferView]; + tinygltf::Buffer *indexBuffer = &model->buffers[indexBufferView->buffer]; + usize byteOffset = (indexAccessor->byteOffset + indexBufferView->byteOffset); + + indexCount = Cast(indexAccessor->count); + indices->reserve(indexOffset + indexCount); + + if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_INT) + { + u32 *data = Recast(indexBuffer->data.data() + byteOffset); + indices->insert(indices->end(), data, data + indexCount); + } + else if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_SHORT) + { + u16 *data = Recast(indexBuffer->data.data() + byteOffset); + indices->insert(indices->end(), data, data + indexCount); + } + else if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_BYTE) + { + u8 *data = Recast(indexBuffer->data.data() + byteOffset); + indices->insert(indices->end(), data, data + indexCount); + } + } + else + { + indexCount = vertexCount; + indices->reserve(indexOffset + vertexCount); + for (u32 i = 0; i < indexCount; ++i) + { + indices->push_back(i); + } + } +#pragma endregion + + auto entity = m_Registry->create(); + entities->push_back(entity); + + m_Registry->emplace(entity, CMesh{ + .m_VertexPositionPtr = vertexOffset * sizeof vec4, + .m_VertexDataPtr = vertexOffset * sizeof VertexData, + .m_FirstIndex = indexOffset, + .m_IndexCount = indexCount, + }); + m_Registry->emplace>(entity); + m_Registry->emplace(entity); + m_Registry->emplace(entity); + m_Registry->emplace>(entity, nodeRoot); + if (prim.material >= 0) + { + m_Registry->emplace(entity, sizeof Material * loadMaterial(prim.material)); + m_Registry->emplace>(entity); + } + + vertexOffset += vertexCount; + indexOffset += indexCount; + } + } + + for (auto childIdx : node->children) + { + ProcessNode(model, vertexPositions, vertexData, indices, entities, loadMaterial, childIdx, nodeRoot); + } +} + +Model +AssetLoader::LoadModelToGpu(cstr path, cstr name) +{ + namespace fs = std::filesystem; + tinygltf::Model model; + tinygltf::TinyGLTF loader; + + const Device *pDevice = m_ResourceManager->m_Device; + + const auto fsPath = fs::absolute(path); + const auto ext = fsPath.extension(); + if (ext == GLTF_ASCII_FILE_EXTENSION) + { + std::string err; + std::string warn; + if (loader.LoadASCIIFromFile(&model, &err, &warn, fsPath.generic_string())) + { + ERROR_IF(!err.empty(), "{}", err) + ELSE_IF_WARN(!warn.empty(), "{}", warn); + } + } + if (ext == GLTF_BINARY_FILE_EXTENSION) + { + std::string err; + std::string warn; + if (loader.LoadBinaryFromFile(&model, &err, &warn, fsPath.generic_string())) + { + ERROR_IF(!err.empty(), "{}", err) + ELSE_IF_WARN(!warn.empty(), "{}", warn); + } + } + + AbortIfFailed(m_CommandBuffer.begin(&OneTimeCmdBeginInfo)); + +#if !defined(ASTER_NDEBUG) + StackString<128> loadActionName = "Load: "; + loadActionName += name ? name : path; + vk::DebugUtilsLabelEXT debugLabel = { + .pLabelName = loadActionName.c_str(), + .color = std::array{1.0f, 1.0f, 1.0f, 1.0f}, + }; + m_CommandBuffer.beginDebugUtilsLabelEXT(&debugLabel); +#endif + + eastl::vector stagingBuffers; + + // TODO: Mesh reordering based on nodes AND OR meshoptimizer + // TODO: Support scenes + + eastl::vector vertexPositions; + eastl::vector vertexData; + eastl::vector indices; + eastl::hash_map textureHandleMap; + eastl::hash_map materialsIndirection; + eastl::vector materials; + eastl::vector entities; + + auto getTextureHandle = [this, &textureHandleMap, &stagingBuffers, &model](i32 index, + bool isSrgb) -> TextureHandle { + if (index < 0) + { + return {}; + } + const auto iter = textureHandleMap.find(index); + if (iter != textureHandleMap.end()) + { + return iter->second; + } + + auto *image = &model.images[index]; + TextureHandle handle = LoadImageToGpu(&stagingBuffers.push_back(), image, isSrgb); + textureHandleMap.emplace(index, handle); + return handle; + }; + auto loadMaterial = [&materials, &getTextureHandle, &model, &materialsIndirection](i32 materialIdx) -> u32 { + auto materialFind = materialsIndirection.find(materialIdx); + if (materialFind != materialsIndirection.end()) + { + return materialFind->second; + } + + u32 index = Cast(materials.size()); + auto *material = &model.materials[materialIdx]; + materials.push_back({ + .m_AlbedoFactor = VectorToVec4(material->pbrMetallicRoughness.baseColorFactor), + .m_EmissionFactor = VectorToVec3(material->emissiveFactor), + .m_MetalFactor = Cast(material->pbrMetallicRoughness.metallicFactor), + .m_RoughFactor = Cast(material->pbrMetallicRoughness.roughnessFactor), + .m_AlbedoTex = getTextureHandle(material->pbrMetallicRoughness.baseColorTexture.index, true), + .m_NormalTex = getTextureHandle(material->normalTexture.index, false), + .m_MetalRoughTex = getTextureHandle(material->pbrMetallicRoughness.metallicRoughnessTexture.index, false), + .m_OcclusionTex = getTextureHandle(material->occlusionTexture.index, false), + .m_EmissionTex = getTextureHandle(material->emissiveTexture.index, true), + }); + materialsIndirection[materialIdx] = index; + return index; + }; + + Entity modelRootEntity = m_Registry->create(); + m_Registry->emplace(modelRootEntity); + m_Registry->emplace(modelRootEntity); + entities.push_back(modelRootEntity); + + assert(model.defaultScene >= 0); + { + auto *scene = &model.scenes[model.defaultScene]; + for (auto nodeIdx : scene->nodes) + { + ProcessNode(&model, &vertexPositions, &vertexData, &indices, &entities, loadMaterial, nodeIdx, + modelRootEntity); + } + } + +#pragma region Staging / Transfer / Uploads + IndexHandle indexHandle; + + GeometryHandle vertexPositionHandle; + GeometryHandle vertexDataHandle; + MaterialHandle materialsHandle; + { + // TODO: Make this work on non-ReBAR via transfers. + auto uploadBufferData = [cmd = this->m_CommandBuffer, &stagingBuffers, pDevice](const Buffer *buffer, + const void *data) { + vk::BufferCopy bufferCopy = {.srcOffset = 0, .dstOffset = 0, .size = buffer->GetSize()}; + StagingBuffer &stagingBuffer = stagingBuffers.push_back(); + stagingBuffer.Init(pDevice, bufferCopy.size); + stagingBuffer.Write(pDevice, 0, bufferCopy.size, data); + cmd.copyBuffer(stagingBuffer.m_Buffer, buffer->m_Buffer, 1, &bufferCopy); + }; + + usize vertexPositionsByteSize = vertexPositions.size() * sizeof vertexPositions[0]; + usize vertexPositionsBaseAddr; + vertexPositionHandle = + m_ResourceManager->CreateGeometryBuffer(vertexPositionsByteSize, alignof(vec4), &vertexPositionsBaseAddr); + m_ResourceManager->Write(vertexPositionHandle, 0, vertexPositionsByteSize, vertexPositions.data()); + + usize vertexDataByteSize = vertexData.size() * sizeof vertexData[0]; + usize vertexDataBaseAddr; + vertexDataHandle = + m_ResourceManager->CreateGeometryBuffer(vertexDataByteSize, alignof(VertexData), &vertexDataBaseAddr); + m_ResourceManager->Write(vertexDataHandle, 0, vertexDataByteSize, vertexData.data()); + + usize materialsByteSize = materials.size() * sizeof materials[0]; + usize materialsBaseAddr; + materialsHandle = + m_ResourceManager->CreateMaterialBuffer(materialsByteSize, alignof(VertexData), &materialsBaseAddr); + m_ResourceManager->Write(materialsHandle, 0, materialsByteSize, materials.data()); + + usize indexByteSize = indices.size() * sizeof indices[0]; + u32 firstIndex; + indexHandle = m_ResourceManager->CreateIndexBuffer(indexByteSize, alignof(u32), &firstIndex); + m_ResourceManager->Write(indexHandle, 0, indexByteSize, indices.data()); + + // TODO(Bob): Replace ByteOffsets with BufferAddress. + auto postProcessMeshView = m_Registry->view>(); + for (auto [meshEntity, mesh] : postProcessMeshView.each()) + { + mesh.m_FirstIndex += firstIndex; + mesh.m_VertexDataPtr += vertexDataBaseAddr; + mesh.m_VertexPositionPtr += vertexPositionsBaseAddr; + m_Registry->remove>(meshEntity); + } + auto postProcessMaterialView = m_Registry->view>(); + for (auto [materialEntity, material] : postProcessMaterialView.each()) + { + material.m_MaterialPtr += materialsBaseAddr; + m_Registry->remove>(materialEntity); + } + } + +#pragma endregion + +#if !defined(ASTER_NDEBUG) + m_CommandBuffer.endDebugUtilsLabelEXT(); +#endif + AbortIfFailed(m_CommandBuffer.end()); + + vk::SubmitInfo submitInfo = { + .waitSemaphoreCount = 0, + .pWaitDstStageMask = nullptr, + .commandBufferCount = 1, + .pCommandBuffers = &m_CommandBuffer, + }; + + vk::Fence fence; + vk::FenceCreateInfo fenceCreateInfo = {}; + AbortIfFailed(pDevice->m_Device.createFence(&fenceCreateInfo, nullptr, &fence)); + AbortIfFailed(m_TransferQueue.submit(1, &submitInfo, fence)); + AbortIfFailed(pDevice->m_Device.waitForFences(1, &fence, true, MaxValue)); + pDevice->m_Device.destroy(fence, nullptr); + + AbortIfFailed(pDevice->m_Device.resetCommandPool(m_CommandPool, {})); + + for (auto &buffer : stagingBuffers) + { + buffer.Destroy(pDevice); + } + + eastl::vector textureHandles; + textureHandles.reserve(textureHandleMap.size()); + + for (auto &[key, val] : textureHandleMap) + { + textureHandles.emplace_back(val); + } + +#if !defined(ASTER_NDEBUG) + assert(m_Registry->view>().empty()); + assert(m_Registry->view>().empty()); + auto x0 = m_Registry->view>(); + auto x1 = m_Registry->view>(); + assert(x0.begin() == x0.end()); + assert(x1.begin() == x1.end()); +#endif + + // TODO("Model Handle needs to be returned. Ideally a single node with model component."); + + return Model{ + .m_Textures = std::move(textureHandles), + .m_Entities = std::move(entities), + .m_IndexHandle = indexHandle, + .m_VertexPositionHandle = vertexPositionHandle, + .m_VertexDataHandle = vertexDataHandle, + .m_MaterialHandle = materialsHandle, + .m_RootEntity = modelRootEntity, + }; +} + +AssetLoader::AssetLoader(RenderResourceManager *resourceManager, EcsRegistry *registry, vk::Queue transferQueue, + u32 transferQueueIndex, u32 graphicsQueueIndex) + : m_ResourceManager(resourceManager) + , m_Registry(registry) + , m_TransferQueue(transferQueue) + , m_TransferQueueIndex(transferQueueIndex) + , m_GraphicsQueueIndex(graphicsQueueIndex) +{ + const Device *pDevice = resourceManager->m_Device; + const vk::CommandPoolCreateInfo poolCreateInfo = { + .flags = vk::CommandPoolCreateFlagBits::eTransient, + .queueFamilyIndex = transferQueueIndex, + }; + AbortIfFailedM(pDevice->m_Device.createCommandPool(&poolCreateInfo, nullptr, &m_CommandPool), + "Transfer command pool creation failed."); + + pDevice->SetName(m_CommandPool, "Asset Loader Command Pool"); + + const vk::CommandBufferAllocateInfo commandBufferAllocateInfo = { + .commandPool = m_CommandPool, + .level = vk::CommandBufferLevel::ePrimary, + .commandBufferCount = 1, + }; + AbortIfFailed(pDevice->m_Device.allocateCommandBuffers(&commandBufferAllocateInfo, &m_CommandBuffer)); + + pDevice->SetName(m_CommandBuffer, "Asset Loader Command Buffer"); +} + +AssetLoader::~AssetLoader() +{ + if (m_ResourceManager) + { + m_ResourceManager->m_Device->m_Device.destroy(m_CommandPool, nullptr); + } +} + +AssetLoader::AssetLoader(AssetLoader &&other) noexcept + : m_ResourceManager(Take(other.m_ResourceManager)) + , m_Registry(other.m_Registry) + , m_CommandPool(other.m_CommandPool) + , m_CommandBuffer(other.m_CommandBuffer) + , m_TransferQueue(other.m_TransferQueue) + , m_TransferQueueIndex(other.m_TransferQueueIndex) + , m_GraphicsQueueIndex(other.m_GraphicsQueueIndex) +{ +} + +AssetLoader & +AssetLoader::operator=(AssetLoader &&other) noexcept +{ + if (this == &other) + return *this; + m_ResourceManager = Take(other.m_ResourceManager); + m_Registry = Take(other.m_Registry); + m_CommandPool = other.m_CommandPool; + m_CommandBuffer = other.m_CommandBuffer; + m_TransferQueue = other.m_TransferQueue; + m_TransferQueueIndex = other.m_TransferQueueIndex; + m_GraphicsQueueIndex = other.m_GraphicsQueueIndex; + return *this; +} \ No newline at end of file diff --git a/samples/04_scenes/asset_loader.h b/samples/04_scenes/asset_loader.h new file mode 100644 index 0000000..8ea83a5 --- /dev/null +++ b/samples/04_scenes/asset_loader.h @@ -0,0 +1,111 @@ +// ============================================= +// Aster: asset_loader.h +// Copyright (c) 2020-2024 Anish Bhobe +// ============================================= + +#pragma once + +#include "global.h" + +#include "buffer.h" +#include "render_resource_manager.h" + +#include "ecs_adapter.h" + +namespace tinygltf +{ +class Model; +struct Image; +} // namespace tinygltf + +struct Image; +struct Texture; + +constexpr auto GLTF_ASCII_FILE_EXTENSION = ".gltf"; +constexpr auto GLTF_BINARY_FILE_EXTENSION = ".glb"; + +struct Material +{ + vec4 m_AlbedoFactor; // 16 16 + vec3 m_EmissionFactor; // 12 28 + f32 m_MetalFactor; // 04 32 + f32 m_RoughFactor; // 04 36 + TextureHandle m_AlbedoTex; // 04 40 + TextureHandle m_NormalTex; // 04 44 + TextureHandle m_MetalRoughTex; // 04 48 + TextureHandle m_OcclusionTex; // 04 52 + TextureHandle m_EmissionTex; // 04 56 + + static constexpr usize ALIGNMENT = 4; +}; + +static_assert(sizeof(Material) == 56); + +struct VertexData +{ + vec4 m_Normal; + vec2 m_TexCoord0 = vec2{0.0f, 0.0f}; + vec2 m_TexCoord1 = vec2{0.0f, 0.0f}; + vec4 m_Color0 = vec4{1.0f, 1.0f, 1.0f, 1.0f}; + + static constexpr usize ALIGNMENT = 16; +}; + +struct Model +{ + eastl::vector m_Textures; + eastl::vector m_Entities; + + IndexHandle m_IndexHandle; + GeometryHandle m_VertexPositionHandle; + GeometryHandle m_VertexDataHandle; + MaterialHandle m_MaterialHandle; + Entity m_RootEntity; + + void Destroy(RenderResourceManager *resourceManager, EcsRegistry *registry); +}; + +struct AssetLoader +{ + RenderResourceManager *m_ResourceManager; + entt::registry *m_Registry; + + vk::CommandPool m_CommandPool; + vk::CommandBuffer m_CommandBuffer; + vk::Queue m_TransferQueue; + u32 m_TransferQueueIndex; + u32 m_GraphicsQueueIndex; + + void LoadHdrImage(Texture *texture, cstr path, cstr name = nullptr) const; + Model LoadModelToGpu(cstr path, cstr name = nullptr); + + constexpr static auto ANormal = "NORMAL"; + constexpr static auto APosition = "POSITION"; + constexpr static auto ATangent = "TANGENT"; + constexpr static auto ATexCoord0 = "TEXCOORD_0"; + constexpr static auto ATexCoord1 = "TEXCOORD_1"; + constexpr static auto AColor0 = "COLOR_0"; + constexpr static auto AJoints0 = "JOINTS_0"; + constexpr static auto AWeights0 = "WEIGHTS_0"; + + AssetLoader(RenderResourceManager *resourceManager, EcsRegistry *registry, vk::Queue transferQueue, + u32 transferQueueIndex, u32 graphicsQueueIndex); + ~AssetLoader(); + + AssetLoader(AssetLoader &&other) noexcept; + AssetLoader &operator=(AssetLoader &&other) noexcept; + + private: + TextureHandle LoadImageToGpu(StagingBuffer *stagingBuffer, tinygltf::Image *image, bool isSrgb) const; + void + ProcessNode(tinygltf::Model *model, eastl::vector *vertexPositions, eastl::vector *vertexData, + eastl::vector *indices, eastl::vector *entities, const std::function &loadMaterial, int current, Entity parent); + + public: + DISALLOW_COPY_AND_ASSIGN(AssetLoader); +}; + +void GenerateMipMaps(vk::CommandBuffer commandBuffer, Texture *texture, vk::ImageLayout initialLayout, + vk::ImageLayout finalLayout, + vk::PipelineStageFlags2 prevStage = vk::PipelineStageFlagBits2::eAllCommands, + vk::PipelineStageFlags2 finalStage = vk::PipelineStageFlagBits2::eAllCommands); \ No newline at end of file diff --git a/samples/04_scenes/camera.h b/samples/04_scenes/camera.h new file mode 100644 index 0000000..72ab822 --- /dev/null +++ b/samples/04_scenes/camera.h @@ -0,0 +1,98 @@ +// ============================================= +// Aster: camera.h +// Copyright (c) 2020-2024 Anish Bhobe +// ============================================= + +#include "global.h" + +struct Camera +{ + mat4 m_View; + mat4 m_Perspective; + mat4 m_InverseView; + mat4 m_InversePerspective; + vec3 m_Position; + f32 m_PositionHomogenousPad_ = 1.0f; + + void + CalculateInverses() + { + m_InverseView = inverse(m_View); + m_InversePerspective = inverse(m_Perspective); + } +}; + +struct CameraController +{ + constexpr static vec3 UP = vec3(0.0f, 1.0f, 0.0f); + + f32 m_Fov; + f32 m_Pitch; + f32 m_Yaw; + f32 m_AspectRatio; + + Camera m_Camera; + + CameraController(const vec3 &position, const vec3 &target, const f32 vFov, const f32 aspectRatio) + : m_Fov(vFov) + , m_Pitch{0.0f} + , m_Yaw{0.0f} + , m_AspectRatio{aspectRatio} + , m_Camera{ + .m_View = lookAt(position, target, UP), + .m_Perspective = glm::perspective(vFov, aspectRatio, 0.1f, 100.0f), + .m_Position = position, + } + { + const vec3 dir = normalize(target - vec3(position)); + m_Pitch = asin(dir.y); + m_Yaw = acos(-dir.z / sqrt(1.0f - dir.y * dir.y)); + m_Camera.CalculateInverses(); + } + + void + SetAspectRatio(const f32 aspectRatio) + { + m_AspectRatio = aspectRatio; + m_Camera.m_Perspective = glm::perspective(m_Fov, aspectRatio, 0.1f, 100.0f); + + m_Camera.CalculateInverses(); + } + + void + SetPosition(const vec3 &position) + { + m_Camera.m_Position = vec4(position, 1.0f); + + f32 cosPitch = cos(m_Pitch); + const vec3 target = vec3(sin(m_Yaw) * cosPitch, sin(m_Pitch), -cos(m_Yaw) * cosPitch); + m_Camera.m_View = lookAt(position, position + target, UP); + + m_Camera.CalculateInverses(); + } + + void + SetPitchYaw(f32 pitch, f32 yaw) + { + m_Pitch = pitch; + m_Yaw = yaw; + + f32 cosPitch = cos(m_Pitch); + const vec3 target = vec3(sin(m_Yaw) * cosPitch, sin(m_Pitch), -cos(m_Yaw) * cosPitch); + const vec3 position = m_Camera.m_Position; + m_Camera.m_View = lookAt(position, position + target, UP); + + m_Camera.CalculateInverses(); + } + + void + SetLookAt(const vec3 &target) + { + const vec3 dir = normalize(target - m_Camera.m_Position); + m_Pitch = acos(dir.y); + m_Yaw = acos(dir.z / sqrt(1.0f - dir.y * dir.y)); + m_Camera.m_View = lookAt(m_Camera.m_Position, m_Camera.m_Position + target, UP); + + m_Camera.CalculateInverses(); + } +}; diff --git a/samples/04_scenes/core_components.h b/samples/04_scenes/core_components.h new file mode 100644 index 0000000..7bdc91e --- /dev/null +++ b/samples/04_scenes/core_components.h @@ -0,0 +1,49 @@ +// ============================================= +// Aster: core_components.h +// Copyright (c) 2020-2024 Anish Bhobe +// ============================================= + +#pragma once + +#include "global.h" + +template +struct CDirty +{ + using RelatedComponentType = TComponent; +}; + +template +struct CParent +{ + using RelatedComponentType = TComponent; + entt::entity m_ParentEntity = NULL_ENTITY; +}; + +struct CDynamicTransform +{ + vec3 m_Position = vec3{0.0f}; + quat m_Rotation = glm::identity(); + vec3 m_Scale = vec3{1.0f}; +}; + +struct CStaticTransform +{}; + +struct CGlobalTransform +{ + mat4 m_Transform = glm::identity(); +}; + +struct CMaterial +{ + uptr m_MaterialPtr; +}; + +struct CMesh +{ + uptr m_VertexPositionPtr; + uptr m_VertexDataPtr; + u32 m_FirstIndex; + u32 m_IndexCount; +}; \ No newline at end of file diff --git a/samples/04_scenes/ecs_adapter.h b/samples/04_scenes/ecs_adapter.h new file mode 100644 index 0000000..3cc09bf --- /dev/null +++ b/samples/04_scenes/ecs_adapter.h @@ -0,0 +1,21 @@ +// ============================================= +// Aster: ecs_adapter.h +// Copyright (c) 2020-2024 Anish Bhobe +// ============================================= + +#pragma once + +#include + +using EcsRegistry = entt::registry; +using Entity = entt::entity; +template +using Without = entt::exclude_t; + +[[nodiscard]] +inline bool Exists(Entity entity) +{ + return entity != entt::null; +} + +constexpr Entity NULL_ENTITY = entt::null; \ No newline at end of file diff --git a/samples/04_scenes/image/.gitattributes b/samples/04_scenes/image/.gitattributes new file mode 100644 index 0000000..77a7139 --- /dev/null +++ b/samples/04_scenes/image/.gitattributes @@ -0,0 +1,2 @@ +*.hdr filter=lfs diff=lfs merge=lfs -text +*.exr filter=lfs diff=lfs merge=lfs -text diff --git a/samples/04_scenes/main.cpp b/samples/04_scenes/main.cpp new file mode 100644 index 0000000..db43aef --- /dev/null +++ b/samples/04_scenes/main.cpp @@ -0,0 +1,529 @@ +// ============================================= +// Aster: main.cpp +// Copyright (c) 2020-2024 Anish Bhobe +// ============================================= + +#include "context.h" +#include "device.h" +#include "helpers.h" +#include "physical_device.h" +#include "render_resource_manager.h" +#include "swapchain.h" +#include "window.h" + +#include "asset_loader.h" +#include "camera.h" +#include "core_components.h" + +#include "ecs_adapter.h" +#include "frame.h" +#include "image.h" +#include "pipeline.h" + +#include "pipeline_utils.h" + +constexpr u32 MAX_FRAMES_IN_FLIGHT = 3; +constexpr auto PIPELINE_CACHE_FILE = "PipelineCacheData.bin"; +constexpr auto MODEL_FILE = "model/DamagedHelmet.glb"; +constexpr auto MODEL_FILE2 = "model/Box.glb"; +constexpr auto BACKDROP_FILE = "image/photo_studio_loft_hall_4k.hdr"; +constexpr u32 INIT_WIDTH = 640; +constexpr u32 INIT_HEIGHT = 480; + +int +main(int, char *[]) +{ + MIN_LOG_LEVEL(Logger::LogType::eInfo); + + Context context = {"Scene Render [WIP]", VERSION}; + Window window = {"Scene Render [WIP] (Aster)", &context, {INIT_WIDTH, INIT_HEIGHT}}; + + PhysicalDevices physicalDevices = {&window, &context}; + PhysicalDevice deviceToUse = FindSuitableDevice(physicalDevices); + + usize physicalDeviceOffsetAlignment = deviceToUse.m_DeviceProperties.limits.minUniformBufferOffsetAlignment; + + vk::Extent2D internalResolution = {1920, 1080}; + internalResolution.width = (internalResolution.height * INIT_WIDTH) / INIT_HEIGHT; + + CameraController cameraController = {vec3{0.0f, 0.0f, 2.0f}, vec3{0.0f}, 70_deg, + Cast(internalResolution.width) / Cast(internalResolution.height)}; + + INFO("Using {} as the primary device.", deviceToUse.m_DeviceProperties.deviceName.data()); + + Features enabledDeviceFeatures = { + .m_Vulkan10Features = + { + .samplerAnisotropy = true, + .shaderInt64 = true, + }, + .m_Vulkan12Features = + { + .descriptorIndexing = true, + .shaderSampledImageArrayNonUniformIndexing = true, + .shaderStorageBufferArrayNonUniformIndexing = true, + .shaderStorageImageArrayNonUniformIndexing = true, + .descriptorBindingUniformBufferUpdateAfterBind = true, // Not related to Bindless + .descriptorBindingSampledImageUpdateAfterBind = true, + .descriptorBindingStorageImageUpdateAfterBind = true, + .descriptorBindingStorageBufferUpdateAfterBind = true, + .descriptorBindingPartiallyBound = true, + .runtimeDescriptorArray = true, + .bufferDeviceAddress = true, + .bufferDeviceAddressCaptureReplay = true, + }, + .m_Vulkan13Features = + { + .synchronization2 = true, + .dynamicRendering = true, + }, + }; + + auto attachmentFormat = vk::Format::eR8G8B8A8Srgb; + auto pipelineCacheData = ReadFileBytes(PIPELINE_CACHE_FILE, false); + + QueueAllocation queueAllocation = FindAppropriateQueueAllocation(&deviceToUse); + Device device = {&context, &deviceToUse, &enabledDeviceFeatures, + {queueAllocation}, pipelineCacheData, "Primary Device"}; + vk::Queue graphicsQueue = device.GetQueue(queueAllocation.m_Family, 0); + Swapchain swapchain = {&window, &device, "Primary Chain"}; + + RenderResourceManager resourceManager = {&device, 1024}; + EcsRegistry registry; + + AssetLoader assetLoader = {&resourceManager, ®istry, graphicsQueue, queueAllocation.m_Family, + queueAllocation.m_Family}; + + Model model = assetLoader.LoadModelToGpu(MODEL_FILE, "Main Model"); + Model model2 = assetLoader.LoadModelToGpu(MODEL_FILE2, "Main Model 2"); + registry.get(model2.m_RootEntity).m_Position.x += 1.0f; + + UniformBuffer ubo; + ubo.Init(&device, sizeof cameraController.m_Camera, "Desc1 UBO"); + ubo.Write(&device, 0, sizeof cameraController.m_Camera, &cameraController.m_Camera); + + Pipeline pipeline = CreatePipeline(&device, attachmentFormat, &resourceManager); + + vk::DescriptorPool descriptorPool; + vk::DescriptorSet perFrameDescriptor; + + { + vk::DescriptorSetLayout descriptorSetLayout = pipeline.m_SetLayouts[1]; + eastl::array poolSizes = { + vk::DescriptorPoolSize{ + .type = vk::DescriptorType::eUniformBuffer, + .descriptorCount = 3, + }, + }; + vk::DescriptorPoolCreateInfo descriptorPoolCreateInfo = { + .maxSets = 1, .poolSizeCount = Cast(poolSizes.size()), .pPoolSizes = poolSizes.data()}; + AbortIfFailed(device.m_Device.createDescriptorPool(&descriptorPoolCreateInfo, nullptr, &descriptorPool)); + + vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo = { + .descriptorPool = descriptorPool, + .descriptorSetCount = 1, + .pSetLayouts = &descriptorSetLayout, + }; + AbortIfFailed(device.m_Device.allocateDescriptorSets(&descriptorSetAllocateInfo, &perFrameDescriptor)); + } + + vk::DescriptorBufferInfo cameraBufferInfo = { + .buffer = ubo.m_Buffer, + .offset = 0, + .range = sizeof(Camera), + }; + eastl::array writeDescriptors = { + vk::WriteDescriptorSet{ + .dstSet = perFrameDescriptor, + .dstBinding = 0, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = vk::DescriptorType::eUniformBuffer, + .pBufferInfo = &cameraBufferInfo, + }, + }; + device.m_Device.updateDescriptorSets(Cast(writeDescriptors.size()), writeDescriptors.data(), 0, nullptr); + + // Persistent variables + vk::Viewport viewport = { + .x = 0, + .y = Cast(internalResolution.height), + .width = Cast(internalResolution.width), + .height = -Cast(internalResolution.height), + .minDepth = 0.0, + .maxDepth = 1.0, + }; + + vk::Rect2D scissor = { + .offset = {0, 0}, + .extent = internalResolution, + }; + + vk::ImageSubresourceRange subresourceRange = { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1, + }; + + vk::ImageMemoryBarrier2 preRenderBarrier = { + .srcStageMask = vk::PipelineStageFlagBits2::eTopOfPipe, + .srcAccessMask = vk::AccessFlagBits2::eNone, + .dstStageMask = vk::PipelineStageFlagBits2::eColorAttachmentOutput, + .dstAccessMask = vk::AccessFlagBits2::eColorAttachmentWrite, + .oldLayout = vk::ImageLayout::eUndefined, + .newLayout = vk::ImageLayout::eColorAttachmentOptimal, + .srcQueueFamilyIndex = queueAllocation.m_Family, + .dstQueueFamilyIndex = queueAllocation.m_Family, + .subresourceRange = subresourceRange, + }; + vk::DependencyInfo preRenderDependencies = { + .imageMemoryBarrierCount = 1, + .pImageMemoryBarriers = &preRenderBarrier, + }; + + vk::ImageMemoryBarrier2 renderToBlitBarrier = { + .srcStageMask = vk::PipelineStageFlagBits2::eColorAttachmentOutput, + .srcAccessMask = vk::AccessFlagBits2::eColorAttachmentWrite, + .dstStageMask = vk::PipelineStageFlagBits2::eAllTransfer, + .dstAccessMask = vk::AccessFlagBits2::eTransferRead, + .oldLayout = vk::ImageLayout::eColorAttachmentOptimal, + .newLayout = vk::ImageLayout::eTransferSrcOptimal, + .srcQueueFamilyIndex = queueAllocation.m_Family, + .dstQueueFamilyIndex = queueAllocation.m_Family, + .subresourceRange = subresourceRange, + }; + vk::ImageMemoryBarrier2 acquireToTransferDstBarrier = { + .srcStageMask = vk::PipelineStageFlagBits2::eTopOfPipe, + .srcAccessMask = vk::AccessFlagBits2::eNone, + .dstStageMask = vk::PipelineStageFlagBits2::eAllTransfer, + .dstAccessMask = vk::AccessFlagBits2::eTransferWrite, + .oldLayout = vk::ImageLayout::eUndefined, + .newLayout = vk::ImageLayout::eTransferDstOptimal, + .srcQueueFamilyIndex = queueAllocation.m_Family, + .dstQueueFamilyIndex = queueAllocation.m_Family, + .subresourceRange = subresourceRange, + }; + eastl::array postRenderBarriers = { + renderToBlitBarrier, + acquireToTransferDstBarrier, + }; + vk::DependencyInfo postRenderDependencies = { + .imageMemoryBarrierCount = Cast(postRenderBarriers.size()), + .pImageMemoryBarriers = postRenderBarriers.data(), + }; + + vk::ImageMemoryBarrier2 transferDstToGuiRenderBarrier = { + .srcStageMask = vk::PipelineStageFlagBits2::eAllTransfer, + .srcAccessMask = vk::AccessFlagBits2::eTransferWrite, + .dstStageMask = vk::PipelineStageFlagBits2::eColorAttachmentOutput, + .dstAccessMask = vk::AccessFlagBits2::eColorAttachmentWrite, + .oldLayout = vk::ImageLayout::eTransferDstOptimal, + .newLayout = vk::ImageLayout::eColorAttachmentOptimal, + .srcQueueFamilyIndex = queueAllocation.m_Family, + .dstQueueFamilyIndex = queueAllocation.m_Family, + .subresourceRange = subresourceRange, + }; + vk::DependencyInfo preGuiDependencies = { + .imageMemoryBarrierCount = 1, + .pImageMemoryBarriers = &transferDstToGuiRenderBarrier, + }; + + vk::ImageMemoryBarrier2 prePresentBarrier = { + .srcStageMask = vk::PipelineStageFlagBits2::eColorAttachmentOutput, + .srcAccessMask = vk::AccessFlagBits2::eColorAttachmentWrite, + .dstStageMask = vk::PipelineStageFlagBits2::eBottomOfPipe, + .dstAccessMask = vk::AccessFlagBits2::eNone, + .oldLayout = vk::ImageLayout::eColorAttachmentOptimal, + .newLayout = vk::ImageLayout::ePresentSrcKHR, + .srcQueueFamilyIndex = queueAllocation.m_Family, + .dstQueueFamilyIndex = queueAllocation.m_Family, + .subresourceRange = subresourceRange, + }; + vk::DependencyInfo prePresentDependencies = { + .imageMemoryBarrierCount = 1, + .pImageMemoryBarriers = &prePresentBarrier, + }; + + FrameManager frameManager = {&device, queueAllocation.m_Family, MAX_FRAMES_IN_FLIGHT}; + eastl::fixed_vector depthImages(frameManager.m_FramesInFlight); + eastl::fixed_vector attachmentImages(frameManager.m_FramesInFlight); + { + auto depthIter = depthImages.begin(); + auto attachmentIter = attachmentImages.begin(); + for (u32 index = 0; index < frameManager.m_FramesInFlight; ++index) + { + auto name = fmt::format("Depth Frame{}", index); + depthIter->Init(&device, internalResolution, name.c_str()); + + name = fmt::format("Attachment0 Frame{}", index); + attachmentIter->Init(&device, internalResolution, attachmentFormat, name.c_str()); + + ++depthIter; + ++attachmentIter; + } + } + + struct NodeData + { + mat4 m_Transform; + uptr m_VertexPositionPtr; + uptr m_VertexDataPtr; + uptr m_MaterialPtr; + + // TODO: Remove + u32 m_FirstIndex; + u32 m_IndexCount; + }; + + eastl::fixed_vector, MAX_FRAMES_IN_FLIGHT> perFrameNodeData(frameManager.m_FramesInFlight); + eastl::fixed_vector perFrameNodeBuffer(frameManager.m_FramesInFlight); + for (auto &bufferHandle : perFrameNodeBuffer) + { + StorageBuffer buffer; + buffer.Init(&device, sizeof(NodeData) * 100'000, true); + bufferHandle = resourceManager.Commit(&buffer); + } + + swapchain.RegisterResizeCallback( + [&cameraController, &internalResolution, &viewport, &scissor](vk::Extent2D extent) { + cameraController.SetAspectRatio(Cast(extent.width) / Cast(extent.height)); + + internalResolution.width = Cast(Cast(internalResolution.height) * cameraController.m_AspectRatio); + + viewport.y = Cast(internalResolution.height); + viewport.width = Cast(internalResolution.width); + viewport.height = -Cast(internalResolution.height); + scissor.extent = internalResolution; + }); + + auto sortByParentHier = [®istry](Entity a, Entity b) { + const auto parent = registry.try_get>(b); + return parent && parent->m_ParentEntity == a; + }; + registry.sort>(sortByParentHier); + + Time::Init(); + + auto rootNodeUpdateView = registry.view(Without>{}); + + auto nodeWithParentsUpdateView = registry.view, CGlobalTransform>(); + nodeWithParentsUpdateView.use>(); + + auto renderableObjectsGroup = registry.group(); + + resourceManager.Update(); + + while (window.Poll()) + { + Time::Update(); + + auto *rot = ®istry.get(model.m_RootEntity).m_Rotation; + *rot = glm::rotate(*rot, Cast(30_deg * Time::m_Delta), vec3{0.0f, 1.0f, 0.0f}); + + Frame *currentFrame = frameManager.GetNextFrame(&swapchain, &window); + + u32 imageIndex = currentFrame->m_ImageIdx; + vk::Image currentSwapchainImage = swapchain.m_Images[imageIndex]; + vk::ImageView currentSwapchainImageView = swapchain.m_ImageViews[imageIndex]; + vk::CommandBuffer cmd = currentFrame->m_CommandBuffer; + + DepthImage *currentDepthImage = &depthImages[currentFrame->m_FrameIdx]; + AttachmentImage *currentAttachment = &attachmentImages[currentFrame->m_FrameIdx]; + + if (currentAttachment->m_Extent.width != internalResolution.width || + currentAttachment->m_Extent.height != internalResolution.height) + { + auto name = fmt::format("Depth Frame{}", currentFrame->m_FrameIdx); + currentDepthImage->Destroy(&device); + currentDepthImage->Init(&device, internalResolution, name.c_str()); + + name = fmt::format("Attachment0 Frame{}", currentFrame->m_FrameIdx); + currentAttachment->Destroy(&device); + currentAttachment->Init(&device, internalResolution, attachmentFormat, name.c_str()); + } + + vk::ImageView currentDepthImageView = currentDepthImage->m_View; + vk::Image currentImage = currentAttachment->m_Image; + vk::ImageView currentImageView = currentAttachment->m_View; + + preRenderBarrier.image = currentImage; + postRenderBarriers[0].image = currentImage; + postRenderBarriers[1].image = currentSwapchainImage; + transferDstToGuiRenderBarrier.image = currentSwapchainImage; + prePresentBarrier.image = currentSwapchainImage; + + ubo.Write(&device, 0, sizeof cameraController.m_Camera, &cameraController.m_Camera); + + for (auto [entity, dynTransform, globalTransform] : rootNodeUpdateView.each()) + { + auto scale = glm::scale(mat4{1.0f}, dynTransform.m_Scale); + auto rotation = glm::toMat4(dynTransform.m_Rotation); + auto translation = glm::translate(mat4{1.0f}, dynTransform.m_Position); + + globalTransform.m_Transform = translation * rotation * scale; + } + + // Has been sorted and ordered by parent. + for (auto [entity, dynTransform, parent, globalTransform] : nodeWithParentsUpdateView.each()) + { + auto scale = glm::scale(mat4{1.0f}, dynTransform.m_Scale); + auto rotation = glm::toMat4(dynTransform.m_Rotation); + auto translation = glm::translate(mat4{1.0f}, dynTransform.m_Position); + + globalTransform.m_Transform = + registry.get(parent.m_ParentEntity).m_Transform * translation * rotation * scale; + } + + usize objectCount = renderableObjectsGroup.size(); + auto *nodeData = &perFrameNodeData[currentFrame->m_FrameIdx]; + nodeData->clear(); + nodeData->reserve(objectCount); + for (auto [entity, globalTransform, mesh, material] : renderableObjectsGroup.each()) + { + nodeData->push_back({ + .m_Transform = globalTransform.m_Transform, + .m_VertexPositionPtr = mesh.m_VertexPositionPtr, + .m_VertexDataPtr = mesh.m_VertexDataPtr, + .m_MaterialPtr = material.m_MaterialPtr, + .m_FirstIndex = mesh.m_FirstIndex, + .m_IndexCount = mesh.m_IndexCount, + }); + } + resourceManager.Write(perFrameNodeBuffer[currentFrame->m_FrameIdx], 0, objectCount * sizeof(NodeData), + nodeData->data()); + + vk::CommandBufferBeginInfo beginInfo = {.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit}; + AbortIfFailed(cmd.begin(&beginInfo)); + + cmd.pipelineBarrier2(&preRenderDependencies); + + // Render + eastl::array attachmentInfos = { + vk::RenderingAttachmentInfo{ + .imageView = currentImageView, + .imageLayout = vk::ImageLayout::eColorAttachmentOptimal, + .resolveMode = vk::ResolveModeFlagBits::eNone, + .loadOp = vk::AttachmentLoadOp::eClear, + .storeOp = vk::AttachmentStoreOp::eStore, + .clearValue = vk::ClearColorValue{0.0f, 0.0f, 0.0f, 1.0f}, + }, + }; + + vk::RenderingAttachmentInfo depthAttachment = { + .imageView = currentDepthImageView, + .imageLayout = vk::ImageLayout::eDepthAttachmentOptimal, + .resolveMode = vk::ResolveModeFlagBits::eNone, + .loadOp = vk::AttachmentLoadOp::eClear, + .storeOp = vk::AttachmentStoreOp::eDontCare, + .clearValue = vk::ClearDepthStencilValue{.depth = 1.0f, .stencil = 0}, + }; + + vk::RenderingInfo renderingInfo = { + .renderArea = {.extent = ToExtent2D(currentAttachment->m_Extent)}, + .layerCount = 1, + .colorAttachmentCount = Cast(attachmentInfos.size()), + .pColorAttachments = attachmentInfos.data(), + .pDepthAttachment = &depthAttachment, + }; + + cmd.beginRendering(&renderingInfo); + + cmd.setViewport(0, 1, &viewport); + cmd.setScissor(0, 1, &scissor); + cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline.m_Layout, 0, 1, + &resourceManager.m_DescriptorSet, 0, nullptr); + + cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline.m_Layout, 1, 1, &perFrameDescriptor, 0, + nullptr); + + //TODO("Unify index buffers"); + cmd.bindIndexBuffer(resourceManager.GetIndexBuffer(), 0, vk::IndexType::eUint32); + + cmd.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline.m_Pipeline); + + // TODO("Get the data to the GPU"); + // auto nodeHandle = perFrameNodeBuffer[currentFrame->m_FrameIdx]; + auto &nodeBuffer = perFrameNodeData[currentFrame->m_FrameIdx]; + for (auto &node : nodeBuffer) + { + cmd.pushConstants(pipeline.m_Layout, vk::ShaderStageFlagBits::eAll, 0, sizeof node, &node); + cmd.drawIndexed(node.m_IndexCount, 1, node.m_FirstIndex, 0, 0); + } + + cmd.endRendering(); + + cmd.pipelineBarrier2(&postRenderDependencies); + + vk::ImageBlit blitRegion = { + .srcSubresource = + { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .mipLevel = 0, + .baseArrayLayer = 0, + .layerCount = 1, + }, + .srcOffsets = + std::array{ + vk::Offset3D{0, 0, 0}, + ToOffset3D(currentAttachment->m_Extent), + }, + .dstSubresource = + { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .mipLevel = 0, + .baseArrayLayer = 0, + .layerCount = 1, + }, + .dstOffsets = + std::array{ + vk::Offset3D{0, 0, 0}, + vk::Offset3D{Cast(swapchain.m_Extent.width), Cast(swapchain.m_Extent.height), 1}, + }, + }; + cmd.blitImage(currentImage, postRenderBarriers[0].newLayout, currentSwapchainImage, + postRenderBarriers[1].newLayout, 1, &blitRegion, vk::Filter::eLinear); + + cmd.pipelineBarrier2(&preGuiDependencies); + + cmd.pipelineBarrier2(&prePresentDependencies); + + AbortIfFailed(cmd.end()); + + vk::PipelineStageFlags waitDstStage = vk::PipelineStageFlagBits::eColorAttachmentOutput; + vk::SubmitInfo submitInfo = { + .waitSemaphoreCount = 1, + .pWaitSemaphores = ¤tFrame->m_ImageAcquireSem, + .pWaitDstStageMask = &waitDstStage, + .commandBufferCount = 1, + .pCommandBuffers = &cmd, + .signalSemaphoreCount = 1, + .pSignalSemaphores = ¤tFrame->m_RenderFinishSem, + }; + AbortIfFailed(graphicsQueue.submit(1, &submitInfo, currentFrame->m_FrameAvailableFence)); + + currentFrame->Present(graphicsQueue, &swapchain, &window); + } + + device.WaitIdle(); + + for (auto bufferHandle : perFrameNodeBuffer) + { + resourceManager.Release(bufferHandle); + } + for (auto depthImage : depthImages) + { + depthImage.Destroy(&device); + } + for (auto attachmentImage : attachmentImages) + { + attachmentImage.Destroy(&device); + } + ubo.Destroy(&device); + + device.m_Device.destroy(descriptorPool, nullptr); + + model.Destroy(&resourceManager, ®istry); + model2.Destroy(&resourceManager, ®istry); +} \ No newline at end of file diff --git a/samples/04_scenes/model/AlphaBlendModeTest.glb b/samples/04_scenes/model/AlphaBlendModeTest.glb new file mode 100644 index 0000000..3c7e82a --- /dev/null +++ b/samples/04_scenes/model/AlphaBlendModeTest.glb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:688786ffa64990071cfd93c96f29917fddd3d254d3f0e48039d80d3b5ac0d8c7 +size 3017136 diff --git a/samples/04_scenes/model/Box.glb b/samples/04_scenes/model/Box.glb new file mode 100644 index 0000000..5445f06 --- /dev/null +++ b/samples/04_scenes/model/Box.glb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed52f7192b8311d700ac0ce80644e3852cd01537e4d62241b9acba023da3d54e +size 1664 diff --git a/samples/04_scenes/model/DamagedHelmet.glb b/samples/04_scenes/model/DamagedHelmet.glb new file mode 100644 index 0000000..a8c16b5 --- /dev/null +++ b/samples/04_scenes/model/DamagedHelmet.glb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1e3b04de97b11de564ce6e53b95f02954a297f0008183ac63a4f5974f6b32d8 +size 3773916 diff --git a/samples/04_scenes/model/MarbleBust/marble_bust_01.bin b/samples/04_scenes/model/MarbleBust/marble_bust_01.bin new file mode 100644 index 0000000..16abb3b --- /dev/null +++ b/samples/04_scenes/model/MarbleBust/marble_bust_01.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bb757c1bd00bfe42005c84e7f76df98ce04a532e595a2ddda8a0335cd959e33 +size 416608 diff --git a/samples/04_scenes/model/MarbleBust/marble_bust_01_4k.gltf b/samples/04_scenes/model/MarbleBust/marble_bust_01_4k.gltf new file mode 100644 index 0000000..bec0067 --- /dev/null +++ b/samples/04_scenes/model/MarbleBust/marble_bust_01_4k.gltf @@ -0,0 +1,161 @@ +{ + "asset": { + "generator": "Khronos glTF Blender I/O v1.6.16", + "version": "2.0" + }, + "scene": 0, + "scenes": [ + { + "name": "Scene", + "nodes": [ + 0 + ] + } + ], + "nodes": [ + { + "mesh": 0, + "name": "marble_bust_01", + "translation": [ + 0, + 0.028335653245449066, + 0 + ] + } + ], + "materials": [ + { + "doubleSided": true, + "name": "marble_bust_01", + "normalTexture": { + "index": 0 + }, + "pbrMetallicRoughness": { + "baseColorTexture": { + "index": 1 + }, + "metallicFactor": 0, + "metallicRoughnessTexture": { + "index": 2 + } + } + } + ], + "meshes": [ + { + "name": "marble_bust_01", + "primitives": [ + { + "attributes": { + "POSITION": 0, + "NORMAL": 1, + "TEXCOORD_0": 2 + }, + "indices": 3, + "material": 0 + } + ] + } + ], + "textures": [ + { + "sampler": 0, + "source": 0 + }, + { + "sampler": 0, + "source": 1 + }, + { + "sampler": 0, + "source": 2 + } + ], + "images": [ + { + "mimeType": "image/jpeg", + "name": "marble_bust_01_nor_gl", + "uri": "textures/marble_bust_01_nor_gl_4k.jpg" + }, + { + "mimeType": "image/jpeg", + "name": "marble_bust_01_diff", + "uri": "textures/marble_bust_01_diff_4k.jpg" + }, + { + "mimeType": "image/jpeg", + "name": "marble_bust_01_arm", + "uri": "textures/marble_bust_01_rough_4k.jpg" + } + ], + "accessors": [ + { + "bufferView": 0, + "componentType": 5126, + "count": 9746, + "max": [ + 0.14886942505836487, + 0.48668384552001953, + 0.1551172435283661 + ], + "min": [ + -0.12288019061088562, + -0.028259359300136566, + -0.1445964276790619 + ], + "type": "VEC3" + }, + { + "bufferView": 1, + "componentType": 5126, + "count": 9746, + "type": "VEC3" + }, + { + "bufferView": 2, + "componentType": 5126, + "count": 9746, + "type": "VEC2" + }, + { + "bufferView": 3, + "componentType": 5123, + "count": 52368, + "type": "SCALAR" + } + ], + "bufferViews": [ + { + "buffer": 0, + "byteLength": 116952, + "byteOffset": 0 + }, + { + "buffer": 0, + "byteLength": 116952, + "byteOffset": 116952 + }, + { + "buffer": 0, + "byteLength": 77968, + "byteOffset": 233904 + }, + { + "buffer": 0, + "byteLength": 104736, + "byteOffset": 311872 + } + ], + "samplers": [ + { + "magFilter": 9729, + "minFilter": 9987 + } + ], + "buffers": [ + { + "byteLength": 416608, + "uri": "marble_bust_01.bin" + } + ] +} \ No newline at end of file diff --git a/samples/04_scenes/model/MarbleBust/textures/marble_bust_01_diff_4k.jpg b/samples/04_scenes/model/MarbleBust/textures/marble_bust_01_diff_4k.jpg new file mode 100644 index 0000000..30d4791 --- /dev/null +++ b/samples/04_scenes/model/MarbleBust/textures/marble_bust_01_diff_4k.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:986a5a956453e3a4e84ca58e16a408c72e423bf8233c4d2a395c59fd94358c83 +size 1671267 diff --git a/samples/04_scenes/model/MarbleBust/textures/marble_bust_01_nor_gl_4k.jpg b/samples/04_scenes/model/MarbleBust/textures/marble_bust_01_nor_gl_4k.jpg new file mode 100644 index 0000000..d9e6693 --- /dev/null +++ b/samples/04_scenes/model/MarbleBust/textures/marble_bust_01_nor_gl_4k.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61645b8878ea6d04bd95379bdffb1a7b07ef955dfba7c7ef3a60f6927473aef4 +size 1541349 diff --git a/samples/04_scenes/model/MarbleBust/textures/marble_bust_01_rough_4k.jpg b/samples/04_scenes/model/MarbleBust/textures/marble_bust_01_rough_4k.jpg new file mode 100644 index 0000000..5e44993 --- /dev/null +++ b/samples/04_scenes/model/MarbleBust/textures/marble_bust_01_rough_4k.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3994d1690406dc076581e636accced3a03c061536984e21fa8818ef5dda17c00 +size 2156013 diff --git a/samples/04_scenes/model/OrientationTest.glb b/samples/04_scenes/model/OrientationTest.glb new file mode 100644 index 0000000..9713071 --- /dev/null +++ b/samples/04_scenes/model/OrientationTest.glb @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a91cf448f37de06ab61bc615e123692dd29ac185e6c69e1fdbf1cf53e41045b2 +size 38920 diff --git a/samples/04_scenes/pipeline_utils.cpp b/samples/04_scenes/pipeline_utils.cpp new file mode 100644 index 0000000..50812b3 --- /dev/null +++ b/samples/04_scenes/pipeline_utils.cpp @@ -0,0 +1,189 @@ +// ============================================= +// Aster: pipeline_utils.cpp +// Copyright (c) 2020-2024 Anish Bhobe +// ============================================= + +#include "pipeline_utils.h" + +#include "device.h" +#include "render_resource_manager.h" +#include "helpers.h" + +#include + +Pipeline +CreatePipeline(const Device *device, vk::Format attachmentFormat, const RenderResourceManager *resourceManager) +{ + // Pipeline Setup + auto vertexShaderModule = CreateShader(device, VERTEX_SHADER_FILE); + auto fragmentShaderModule = CreateShader(device, FRAGMENT_SHADER_FILE); + + eastl::array shaderStages = {{ + { + .stage = vk::ShaderStageFlagBits::eVertex, + .module = vertexShaderModule, + .pName = "main", + }, + { + .stage = vk::ShaderStageFlagBits::eFragment, + .module = fragmentShaderModule, + .pName = "main", + }, + }}; + + eastl::vector descriptorSetLayouts; + + descriptorSetLayouts.push_back(resourceManager->m_SetLayout); + + { + eastl::array descriptorSetLayoutBindings = { + vk::DescriptorSetLayoutBinding{ + .binding = 0, + .descriptorType = vk::DescriptorType::eUniformBuffer, + .descriptorCount = 1, + .stageFlags = vk::ShaderStageFlagBits::eAll, + }, + vk::DescriptorSetLayoutBinding{ + .binding = 1, + .descriptorType = vk::DescriptorType::eUniformBuffer, + .descriptorCount = 1, + .stageFlags = vk::ShaderStageFlagBits::eAll, + }, + vk::DescriptorSetLayoutBinding{ + .binding = 2, + .descriptorType = vk::DescriptorType::eUniformBuffer, + .descriptorCount = 1, + .stageFlags = vk::ShaderStageFlagBits::eAll, + }, + }; + vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = { + .bindingCount = Cast(descriptorSetLayoutBindings.size()), + .pBindings = descriptorSetLayoutBindings.data(), + }; + vk::DescriptorSetLayout descriptorSetLayout; + AbortIfFailed( + device->m_Device.createDescriptorSetLayout(&descriptorSetLayoutCreateInfo, nullptr, &descriptorSetLayout)); + descriptorSetLayouts.push_back(descriptorSetLayout); + } + + vk::PushConstantRange pushConstantRange = { + .stageFlags = vk::ShaderStageFlagBits::eAll, + .offset = 0, + .size = 96, + }; + + vk::PipelineLayoutCreateInfo pipelineLayoutCreateInfo = { + .setLayoutCount = Cast(descriptorSetLayouts.size()), + .pSetLayouts = descriptorSetLayouts.data(), + .pushConstantRangeCount = 1, + .pPushConstantRanges = &pushConstantRange, + }; + vk::PipelineLayout pipelineLayout; + AbortIfFailed(device->m_Device.createPipelineLayout(&pipelineLayoutCreateInfo, nullptr, &pipelineLayout)); + device->SetName(pipelineLayout, "Box Layout"); + + descriptorSetLayouts[0] = nullptr; // Not owned. + + vk::PipelineVertexInputStateCreateInfo vertexInputStateCreateInfo = {}; + vk::PipelineInputAssemblyStateCreateInfo inputAssemblyStateCreateInfo = { + .topology = vk::PrimitiveTopology::eTriangleList, + .primitiveRestartEnable = false, + }; + + vk::PipelineViewportStateCreateInfo viewportStateCreateInfo = { + .viewportCount = 1, + .scissorCount = 1, + }; + + vk::PipelineRasterizationStateCreateInfo rasterizationStateCreateInfo = { + .depthClampEnable = false, + .rasterizerDiscardEnable = false, + .polygonMode = vk::PolygonMode::eFill, + .cullMode = vk::CullModeFlagBits::eBack, + .frontFace = vk::FrontFace::eCounterClockwise, + .depthBiasEnable = false, + .lineWidth = 1.0, + }; + vk::PipelineMultisampleStateCreateInfo multisampleStateCreateInfo = { + .rasterizationSamples = vk::SampleCountFlagBits::e1, + .sampleShadingEnable = false, + }; + vk::PipelineDepthStencilStateCreateInfo depthStencilStateCreateInfo = { + .depthTestEnable = true, + .depthWriteEnable = true, + .depthCompareOp = vk::CompareOp::eLess, + }; + vk::PipelineColorBlendAttachmentState colorBlendAttachmentState = { + .blendEnable = false, + .srcColorBlendFactor = vk::BlendFactor::eSrcColor, + .dstColorBlendFactor = vk::BlendFactor::eOneMinusSrcColor, + .colorBlendOp = vk::BlendOp::eAdd, + .srcAlphaBlendFactor = vk::BlendFactor::eSrcAlpha, + .dstAlphaBlendFactor = vk::BlendFactor::eOneMinusSrcAlpha, + .alphaBlendOp = vk::BlendOp::eAdd, + .colorWriteMask = vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG | + vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA, + }; + vk::PipelineColorBlendStateCreateInfo colorBlendStateCreateInfo = { + .logicOpEnable = false, + .attachmentCount = 1, + .pAttachments = &colorBlendAttachmentState, + }; + + eastl::array dynamicStates = { + vk::DynamicState::eScissor, + vk::DynamicState::eViewport, + }; + + vk::PipelineDynamicStateCreateInfo dynamicStateCreateInfo = { + .dynamicStateCount = Cast(dynamicStates.size()), + .pDynamicStates = dynamicStates.data(), + }; + + vk::PipelineRenderingCreateInfo renderingCreateInfo = { + .viewMask = 0, + .colorAttachmentCount = 1, + .pColorAttachmentFormats = &attachmentFormat, + .depthAttachmentFormat = vk::Format::eD24UnormS8Uint, + }; + + vk::GraphicsPipelineCreateInfo pipelineCreateInfo = { + .pNext = &renderingCreateInfo, + .stageCount = Cast(shaderStages.size()), + .pStages = shaderStages.data(), + .pVertexInputState = &vertexInputStateCreateInfo, + .pInputAssemblyState = &inputAssemblyStateCreateInfo, + .pViewportState = &viewportStateCreateInfo, + .pRasterizationState = &rasterizationStateCreateInfo, + .pMultisampleState = &multisampleStateCreateInfo, + .pDepthStencilState = &depthStencilStateCreateInfo, + .pColorBlendState = &colorBlendStateCreateInfo, + .pDynamicState = &dynamicStateCreateInfo, + .layout = pipelineLayout, + }; + vk::Pipeline pipeline; + AbortIfFailed( + device->m_Device.createGraphicsPipelines(device->m_PipelineCache, 1, &pipelineCreateInfo, nullptr, &pipeline)); + device->SetName(pipeline, "Box Pipeline"); + + device->m_Device.destroy(vertexShaderModule, nullptr); + device->m_Device.destroy(fragmentShaderModule, nullptr); + + return {device, pipelineLayout, pipeline, std::move(descriptorSetLayouts)}; +} + +vk::ShaderModule +CreateShader(const Device *device, cstr shaderFile) +{ + eastl::vector shaderCode = ReadFile(shaderFile); + + const vk::ShaderModuleCreateInfo shaderModuleCreateInfo = { + .codeSize = shaderCode.size() * sizeof(u32), + .pCode = shaderCode.data(), + }; + vk::ShaderModule shaderModule; + + AbortIfFailedMV(device->m_Device.createShaderModule(&shaderModuleCreateInfo, nullptr, &shaderModule), + "Shader {} could not be created.", shaderFile); + return shaderModule; +} diff --git a/samples/04_scenes/pipeline_utils.h b/samples/04_scenes/pipeline_utils.h new file mode 100644 index 0000000..01ac264 --- /dev/null +++ b/samples/04_scenes/pipeline_utils.h @@ -0,0 +1,20 @@ +// ============================================= +// Aster: pipeline_utils.h +// Copyright (c) 2020-2024 Anish Bhobe +// ============================================= + +#pragma once + +#include "global.h" +#include "pipeline.h" + +struct RenderResourceManager; +struct Swapchain; +struct Device; + +constexpr auto VERTEX_SHADER_FILE = "shader/model.vert.glsl.spv"; +constexpr auto FRAGMENT_SHADER_FILE = "shader/model.frag.glsl.spv"; + +vk::ShaderModule CreateShader(const Device *device, cstr shaderFile); +Pipeline +CreatePipeline(const Device *device, vk::Format attachmentFormat, const RenderResourceManager *resourceManager); diff --git a/samples/04_scenes/render_resource_manager.cpp b/samples/04_scenes/render_resource_manager.cpp new file mode 100644 index 0000000..aa3bc41 --- /dev/null +++ b/samples/04_scenes/render_resource_manager.cpp @@ -0,0 +1,943 @@ +// ============================================= +// Aster: render_resource_manager.cpp +// Copyright (c) 2020-2024 Anish Bhobe +// ============================================= + +#include "render_resource_manager.h" + +#include "buffer.h" +#include "device.h" +#include "helpers.h" +#include "image.h" + +#include + +void +TextureManager::Init(const u32 maxCapacity) +{ + m_MaxCapacity = maxCapacity; + m_FreeHead = GpuResourceHandle::INVALID_HANDLE; +} + +TextureHandle +TextureManager::Commit(Texture *texture) +{ + ERROR_IF(!texture || !texture->IsValid(), "Texture must be valid for commital") + THEN_ABORT(-1); + + if (m_FreeHead != GpuResourceHandle::INVALID_HANDLE) + { + const u32 index = m_FreeHead; + + Texture *allocatedTexture = &m_Textures[index]; + + assert(!allocatedTexture->IsValid()); + m_FreeHead = *Recast(allocatedTexture); + + // Ensure it is copyable. + static_assert(std::is_trivially_copyable_v); + *allocatedTexture = *texture; + + // Take ownership of the texture. + texture->m_Flags_ &= ~Texture::OWNED_BIT; + + return {index}; + } + + const u32 index = Cast(m_Textures.size()); + if (index < m_MaxCapacity) + { + Texture *allocatedTexture = &m_Textures.push_back(); + + // Ensure it is copyable. + static_assert(std::is_trivially_copyable_v); + *allocatedTexture = *texture; + + texture->m_Flags_ &= ~Texture::OWNED_BIT; + + return {index}; + } + + ERROR("Out of Buffers") THEN_ABORT(-1); +} + +Texture * +TextureManager::Fetch(const TextureHandle handle) +{ + assert(!handle.IsInvalid()); + + return &m_Textures[handle.m_Index]; +} + +void +TextureManager::Release(const Device *device, const TextureHandle handle) +{ + assert(!handle.IsInvalid()); + + Texture *allocatedTexture = &m_Textures[handle.m_Index]; + allocatedTexture->Destroy(device); + + assert(!allocatedTexture->IsValid()); + *Recast(allocatedTexture) = m_FreeHead; + + m_FreeHead = handle.m_Index; +} + +void +TextureManager::Destroy(const Device *device) +{ + for (auto &texture : m_Textures) + { + texture.Destroy(device); + } +} + +void +BufferManager::Init(const u32 maxCapacity) +{ + m_MaxCapacity = maxCapacity; + m_FreeHead = GpuResourceHandle::INVALID_HANDLE; +} + +BufferHandle +BufferManager::Commit(StorageBuffer *buffer) +{ + ERROR_IF(!buffer || !buffer->IsValid() || !buffer->IsOwned(), "Buffer must be valid and owned for commital") + THEN_ABORT(-1); + + if (m_FreeHead != GpuResourceHandle::INVALID_HANDLE) + { + const u32 index = m_FreeHead; + + StorageBuffer *allocatedBuffer = &m_Buffers[index]; + + assert(!allocatedBuffer->IsValid()); + m_FreeHead = *Recast(allocatedBuffer); + + // Ensure it is copyable. + static_assert(std::is_trivially_copyable_v); + *allocatedBuffer = *buffer; + + // Take ownership of the buffer. + buffer->m_Size_ &= ~StorageBuffer::OWNED_BIT; + + return {index}; + } + + const u32 index = Cast(m_Buffers.size()); + if (index < m_MaxCapacity) + { + StorageBuffer *allocatedBuffer = &m_Buffers.push_back(); + + // Ensure it is copyable. + static_assert(std::is_trivially_copyable_v); + *allocatedBuffer = *buffer; + + buffer->m_Size_ &= ~StorageBuffer::OWNED_BIT; + + return {index}; + } + + ERROR("Out of Buffers") THEN_ABORT(-1); +} + +StorageBuffer * +BufferManager::Fetch(const BufferHandle handle) +{ + assert(!handle.IsInvalid()); + + return &m_Buffers[handle.m_Index]; +} + +void +BufferManager::Release(const Device *device, const BufferHandle handle) +{ + assert(!handle.IsInvalid()); + + StorageBuffer *allocatedBuffer = &m_Buffers[handle.m_Index]; + allocatedBuffer->Destroy(device); + + assert(!allocatedBuffer->IsValid()); + *Recast(allocatedBuffer) = m_FreeHead; + + m_FreeHead = handle.m_Index; +} + +void +BufferManager::Destroy(const Device *device) +{ + for (auto &buffer : m_Buffers) + { + buffer.Destroy(device); + } +} + +StorageTextureHandle +StorageTextureManager::Commit(StorageTexture *texture) +{ + const TextureHandle tx = TextureManager::Commit(texture); + return {tx.m_Index}; +} + +StorageTexture * +StorageTextureManager::Fetch(const StorageTextureHandle handle) +{ + assert(!handle.IsInvalid()); + + return Recast(&m_Textures[handle.m_Index]); +} + +void +StorageTextureManager::Release(const Device *device, const StorageTextureHandle handle) +{ + TextureManager::Release(device, {handle.m_Index}); +} + +usize +HashSamplerCreateInfo(const vk::SamplerCreateInfo *createInfo) +{ + usize hash = HashAny(createInfo->flags); + hash = HashCombine(hash, HashAny(createInfo->magFilter)); + hash = HashCombine(hash, HashAny(createInfo->minFilter)); + hash = HashCombine(hash, HashAny(createInfo->mipmapMode)); + hash = HashCombine(hash, HashAny(createInfo->addressModeU)); + hash = HashCombine(hash, HashAny(createInfo->addressModeV)); + hash = HashCombine(hash, HashAny(createInfo->addressModeW)); + hash = HashCombine(hash, HashAny(Cast(createInfo->mipLodBias * 1000))); // Resolution of 10^-3 + hash = HashCombine(hash, HashAny(createInfo->anisotropyEnable)); + hash = HashCombine(hash, + HashAny(Cast(createInfo->maxAnisotropy * 0x10))); // 16:1 Anisotropy is enough resolution + hash = HashCombine(hash, HashAny(createInfo->compareEnable)); + hash = HashCombine(hash, HashAny(createInfo->compareOp)); + hash = HashCombine(hash, HashAny(Cast(createInfo->minLod * 1000))); // 0.001 resolution is enough. + hash = HashCombine(hash, + HashAny(Cast(createInfo->maxLod * 1000))); // 0.001 resolution is enough. (1 == NO Clamp) + hash = HashCombine(hash, HashAny(createInfo->borderColor)); + hash = HashCombine(hash, HashAny(createInfo->unnormalizedCoordinates)); + + return hash; +} + +void +SamplerManager::Init(usize size) +{ + m_Samplers.reserve(size); + m_SamplerHashes.reserve(size); +} + +SamplerHandle +SamplerManager::Create(const Device *device, const vk::SamplerCreateInfo *createInfo) +{ + const usize hash = HashSamplerCreateInfo(createInfo); + + for (u32 index = 0; usize samplerHash : m_SamplerHashes) + { + if (samplerHash == hash) + { + return {index}; + } + ++index; + } + + vk::Sampler sampler; + AbortIfFailed(device->m_Device.createSampler(createInfo, nullptr, &sampler)); + const u32 index = Cast(m_SamplerHashes.size()); + m_SamplerHashes.push_back(hash); + m_Samplers.push_back(sampler); + return {index}; +} + +vk::Sampler +SamplerManager::Fetch(const SamplerHandle handle) +{ + assert(!handle.IsInvalid()); + + return m_Samplers[handle.m_Index]; +} + +void +SamplerManager::Destroy(const Device *device) +{ + for (const auto &sampler : m_Samplers) + { + device->m_Device.destroy(sampler, nullptr); + } + m_Samplers.clear(); + m_SamplerHashes.clear(); +} + +void +VirtualizedBufferPool::InitStorage(const Device *device, usize bufferMaxSize) +{ + auto buffer = std::make_unique(); + buffer->Init(device, bufferMaxSize, true, true, "Unified Geometry Buffer"); + m_BackingBuffer = std::move(buffer); + + vk::BufferDeviceAddressInfo addressInfo = { + .buffer = m_BackingBuffer->m_Buffer, + }; + m_BufferPtr = device->m_Device.getBufferAddress(&addressInfo); + + const VmaVirtualBlockCreateInfo virtualBlockCreateInfo = { + .size = bufferMaxSize, + }; + AbortIfFailed(Cast(vmaCreateVirtualBlock(&virtualBlockCreateInfo, &m_Block))); +} + +void +VirtualizedBufferPool::InitIndex(const Device *device, usize bufferMaxSize) +{ + auto buffer = std::make_unique(); + buffer->Init(device, bufferMaxSize, true, true, "Unified Index Buffer"); + m_BackingBuffer = std::move(buffer); + + vk::BufferDeviceAddressInfo addressInfo = { + .buffer = m_BackingBuffer->m_Buffer, + }; + m_BufferPtr = device->m_Device.getBufferAddress(&addressInfo); + + const VmaVirtualBlockCreateInfo virtualBlockCreateInfo = { + .size = bufferMaxSize, + }; + AbortIfFailed(Cast(vmaCreateVirtualBlock(&virtualBlockCreateInfo, &m_Block))); +} + +void +VirtualizedBufferPool::UpdateToGpu(const Device *device) +{ + // Unrequired until adding the non-ReBAR support. +} + +VirtualizedBufferHandle +VirtualizedBufferPool::Create(usize size, usize alignment) +{ + const VmaVirtualAllocationCreateInfo virtualAllocationCreateInfo = { + .size = size, + .alignment = alignment, + }; + VmaVirtualAllocation allocation; + usize offset; + AbortIfFailed(vmaVirtualAllocate(m_Block, &virtualAllocationCreateInfo, &allocation, &offset)); + const VirtualBuffer virtualBuffer = { + .m_Allocation = allocation, + .m_Offset = offset, + .m_Size = size, + }; + + u32 index; + VirtualBuffer *allocVBuf; + if (m_FreeHead != GpuResourceHandle::INVALID_HANDLE) + { + index = m_FreeHead; + allocVBuf = &m_VirtualBuffers[index]; + + m_FreeHead = *Recast(allocVBuf); + } + else + { + index = Cast(m_VirtualBuffers.size()); + allocVBuf = &m_VirtualBuffers.push_back(); + } + + // Ensure it is copyable. + static_assert(std::is_trivially_copyable_v); + *allocVBuf = virtualBuffer; + m_Dirty = true; + + return {index}; +} + +uptr +VirtualizedBufferPool::FetchOffset(VirtualizedBufferHandle handle) +{ + assert(!handle.IsInvalid()); + return m_VirtualBuffers[handle.m_Index].m_Offset; +} + +void +VirtualizedBufferPool::Release(VirtualizedBufferHandle handle) +{ + assert(!handle.IsInvalid()); + + VirtualBuffer *virtualBuffer = &m_VirtualBuffers[handle.m_Index]; + vmaVirtualFree(m_Block, virtualBuffer->m_Allocation); + + *Recast(virtualBuffer) = m_FreeHead; + + m_FreeHead = handle.m_Index; +} + +void +VirtualizedBufferPool::Write(VirtualizedBufferHandle handle, usize offset, usize size, const void *data) +{ + if (handle.IsInvalid()) + return; + + assert(m_BackingBuffer->IsMapped() && "Non ReBAR not supported."); + + const VirtualBuffer *virtualBuffer = &m_VirtualBuffers[handle.m_Index]; + assert(offset + size <= virtualBuffer->m_Size); + + u8 *target = m_BackingBuffer->m_Mapped + virtualBuffer->m_Offset + offset; + memcpy(target, data, size); +} + +void +VirtualizedBufferPool::Destroy(const Device *device) +{ + m_BackingBuffer->Destroy(device); + m_BackingBuffer.reset(); +} + +RenderResourceManager::WriteInfo::WriteInfo(vk::DescriptorBufferInfo info) + : uBufferInfo(info) +{ +} + +RenderResourceManager::WriteInfo::WriteInfo(vk::DescriptorImageInfo info) + : uImageInfo(info) +{ +} + +RenderResourceManager::WriteInfo::WriteInfo(vk::BufferView info) + : uBufferView(info) +{ +} + +BufferHandle +RenderResourceManager::Commit(StorageBuffer *storageBuffer) +{ + const BufferHandle handle = m_BufferManager.Commit(storageBuffer); + + m_WriteInfos.emplace_back(vk::DescriptorBufferInfo{ + .buffer = storageBuffer->m_Buffer, + .offset = 0, + .range = storageBuffer->GetSize(), + }); + + m_Writes.push_back({ + .dstSet = m_DescriptorSet, + .dstBinding = BUFFER_BINDING_INDEX, + .dstArrayElement = handle.m_Index, + .descriptorCount = 1, + .descriptorType = vk::DescriptorType::eStorageBuffer, + .pBufferInfo = &m_WriteInfos.back().uBufferInfo, + }); + + m_WriteOwner.emplace_back(HandleType::eBuffer, handle.m_Index); + +#if !defined(ASTER_NDEBUG) + ++m_CommitedBufferCount; +#endif + + return handle; +} + +void +RenderResourceManager::Write(const BufferHandle handle, const usize offset, const usize size, const void *data) +{ + m_BufferManager.Fetch(handle)->Write(m_Device, offset, size, data); +} + +void +RenderResourceManager::EraseWrites(u32 handleIndex, HandleType handleType) +{ + auto writeIter = m_Writes.begin(); + auto ownerIter = m_WriteOwner.begin(); + const auto ownerEnd = m_WriteOwner.end(); + + while (ownerIter != ownerEnd) + { + if (ownerIter->first == handleType && ownerIter->second == handleIndex) + { + *writeIter = m_Writes.back(); + *ownerIter = m_WriteOwner.back(); + m_Writes.pop_back(); + m_WriteOwner.pop_back(); + return; + } + + ++ownerIter; + ++writeIter; + } +} + +void +RenderResourceManager::Release(BufferHandle handle) +{ + if (handle.IsInvalid()) + return; + + EraseWrites(handle.m_Index, HandleType::eBuffer); + + m_BufferManager.Release(m_Device, handle); + +#if !defined(ASTER_NDEBUG) + --m_CommitedBufferCount; +#endif +} + +void +RenderResourceManager::Release(StorageBuffer *storageBuffer, const BufferHandle handle) +{ + assert(storageBuffer); + assert(!storageBuffer->IsValid()); + + StorageBuffer *internal = m_BufferManager.Fetch(handle); + *storageBuffer = *internal; + internal->m_Size_ &= ~StorageBuffer::OWNED_BIT; + + Release(handle); +} + +void +RenderResourceManager::Release(TextureHandle handle) +{ + if (handle.IsInvalid()) + return; + + EraseWrites(handle.m_Index, HandleType::eTexture); + + m_TextureManager.Release(m_Device, handle); + +#if !defined(ASTER_NDEBUG) + --m_CommitedTextureCount; +#endif +} + +void +RenderResourceManager::Release(Texture *texture, TextureHandle handle) +{ + assert(texture); + assert(!texture->IsValid()); + + Texture *internal = m_TextureManager.Fetch(handle); + *texture = *internal; + internal->m_Flags_ &= ~Texture::OWNED_BIT; + + Release(handle); +} + +TextureHandle +RenderResourceManager::CommitTexture(Texture *texture, const SamplerHandle sampler) +{ + TextureHandle handle = m_TextureManager.Commit(texture); + + const vk::Sampler samplerImpl = sampler.IsInvalid() ? m_DefaultSampler : m_SamplerManager.Fetch(sampler); + + m_WriteInfos.emplace_back(vk::DescriptorImageInfo{ + .sampler = samplerImpl, + .imageView = texture->m_View, + .imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal, + }); + + m_Writes.push_back({ + .dstSet = m_DescriptorSet, + .dstBinding = TEXTURE_BINDING_INDEX, + .dstArrayElement = handle.m_Index, + .descriptorCount = 1, + .descriptorType = vk::DescriptorType::eCombinedImageSampler, + .pImageInfo = &m_WriteInfos.back().uImageInfo, + }); + + m_WriteOwner.emplace_back(HandleType::eTexture, handle.m_Index); + +#if !defined(ASTER_NDEBUG) + ++m_CommitedTextureCount; +#endif + + return {handle}; +} + +StorageTextureHandle +RenderResourceManager::CommitStorageTexture(StorageTexture *storageTexture, SamplerHandle sampler) +{ + StorageTextureHandle handle = m_StorageTextureManager.Commit(storageTexture); + + vk::Sampler samplerImpl = sampler.IsInvalid() ? m_DefaultSampler : m_SamplerManager.Fetch(sampler); + + m_WriteInfos.emplace_back(vk::DescriptorImageInfo{ + .sampler = samplerImpl, + .imageView = storageTexture->m_View, + .imageLayout = vk::ImageLayout::eGeneral, + }); + + m_Writes.push_back({ + .dstSet = m_DescriptorSet, + .dstBinding = STORAGE_TEXTURE_BINDING_INDEX, + .dstArrayElement = handle.m_Index, + .descriptorCount = 1, + .descriptorType = vk::DescriptorType::eStorageImage, + .pImageInfo = &m_WriteInfos.back().uImageInfo, + }); + + m_WriteOwner.emplace_back(HandleType::eStorageTexture, handle.m_Index); + +#if !defined(ASTER_NDEBUG) + ++m_CommitedStorageTextureCount; +#endif + + return {handle}; +} + +void +RenderResourceManager::Release(StorageTextureHandle handle) +{ + if (handle.IsInvalid()) + return; + + EraseWrites(handle.m_Index, HandleType::eTexture); + + m_StorageTextureManager.Release(m_Device, handle); + +#if !defined(ASTER_NDEBUG) + --m_CommitedStorageTextureCount; +#endif +} + +void +RenderResourceManager::Release(StorageTexture *texture, const StorageTextureHandle handle) +{ + assert(texture); + assert(!texture->IsValid()); + + StorageTexture *internal = m_StorageTextureManager.Fetch(handle); + *texture = *internal; + internal->m_Flags_ &= ~StorageTexture::OWNED_BIT; + + Release(handle); +} + +void +RenderResourceManager::Update() +{ + // Descriptor Updates + if (!m_Writes.empty()) + { + m_Device->m_Device.updateDescriptorSets(Cast(m_Writes.size()), m_Writes.data(), 0, nullptr); + + m_Writes.clear(); + m_WriteInfos.clear(); + m_WriteOwner.clear(); + } + + // Sub-system updates + m_Geometry.UpdateToGpu(m_Device); +} + +RenderResourceManager::RenderResourceManager(Device *device, u16 maxSize, bool useBufferAddress) + : m_Device(device) + , m_UseBufferAddr(useBufferAddress) +{ + vk::PhysicalDeviceProperties properties; + m_Device->m_PhysicalDevice.getProperties(&properties); + + u32 buffersCount = eastl::min(properties.limits.maxPerStageDescriptorStorageBuffers - 1024, Cast(maxSize)); + u32 texturesCount = eastl::min(properties.limits.maxPerStageDescriptorSampledImages - 1024, Cast(maxSize)); + u32 storageTexturesCount = + eastl::min(properties.limits.maxPerStageDescriptorStorageImages - 1024, Cast(maxSize)); + + INFO("Max Buffer Count: {}", buffersCount); + INFO("Max Texture Count: {}", texturesCount); + INFO("Max Storage Texture Count: {}", storageTexturesCount); + + m_Geometry.InitStorage(device, Megabyte(128u)); + m_Index.InitIndex(device, Megabyte(8u)); + m_Material.InitStorage(device, Kilobyte(560u)); + m_BufferManager.Init(buffersCount); + m_TextureManager.Init(texturesCount); + m_StorageTextureManager.Init(storageTexturesCount); + m_SamplerManager.Init(storageTexturesCount); + + m_DefaultSamplerCreateInfo = { + .magFilter = vk::Filter::eLinear, + .minFilter = vk::Filter::eLinear, + .mipmapMode = vk::SamplerMipmapMode::eLinear, + .addressModeU = vk::SamplerAddressMode::eRepeat, + .addressModeV = vk::SamplerAddressMode::eRepeat, + .addressModeW = vk::SamplerAddressMode::eRepeat, + .mipLodBias = 0.0f, + .anisotropyEnable = true, + .maxAnisotropy = properties.limits.maxSamplerAnisotropy, + .compareEnable = false, + .minLod = 0, + .maxLod = VK_LOD_CLAMP_NONE, + .borderColor = vk::BorderColor::eFloatOpaqueBlack, + .unnormalizedCoordinates = false, + }; + + m_DefaultSampler = m_SamplerManager.Fetch(m_SamplerManager.Create(device, &m_DefaultSamplerCreateInfo)); + + eastl::array poolSizes = { + vk::DescriptorPoolSize{ + .type = vk::DescriptorType::eStorageBuffer, + .descriptorCount = buffersCount, + }, + vk::DescriptorPoolSize{ + .type = vk::DescriptorType::eCombinedImageSampler, + .descriptorCount = texturesCount, + }, + vk::DescriptorPoolSize{ + .type = vk::DescriptorType::eStorageImage, + .descriptorCount = storageTexturesCount, + }, + }; + + const vk::DescriptorPoolCreateInfo poolCreateInfo = { + .flags = vk::DescriptorPoolCreateFlagBits::eUpdateAfterBind, + .maxSets = 1, + .poolSizeCount = Cast(poolSizes.size()), + .pPoolSizes = poolSizes.data(), + }; + AbortIfFailed(device->m_Device.createDescriptorPool(&poolCreateInfo, nullptr, &m_DescriptorPool)); + + vk::DescriptorBindingFlags bindingFlags = + vk::DescriptorBindingFlagBits::ePartiallyBound | vk::DescriptorBindingFlagBits::eUpdateAfterBind; + eastl::array layoutBindingFlags = { + bindingFlags, + bindingFlags, + bindingFlags, + }; + + vk::DescriptorSetLayoutBindingFlagsCreateInfo bindingFlagsCreateInfo = { + .bindingCount = Cast(layoutBindingFlags.size()), + .pBindingFlags = layoutBindingFlags.data(), + }; + + eastl::array descriptorLayoutBindings = { + vk::DescriptorSetLayoutBinding{ + .binding = BUFFER_BINDING_INDEX, + .descriptorType = vk::DescriptorType::eStorageBuffer, + .descriptorCount = Cast(buffersCount), + .stageFlags = vk::ShaderStageFlagBits::eAll, + }, + vk::DescriptorSetLayoutBinding{ + .binding = TEXTURE_BINDING_INDEX, + .descriptorType = vk::DescriptorType::eCombinedImageSampler, + .descriptorCount = Cast(texturesCount), + .stageFlags = vk::ShaderStageFlagBits::eAll, + }, + vk::DescriptorSetLayoutBinding{ + .binding = STORAGE_TEXTURE_BINDING_INDEX, + .descriptorType = vk::DescriptorType::eStorageImage, + .descriptorCount = Cast(storageTexturesCount), + .stageFlags = vk::ShaderStageFlagBits::eAll, + }, + }; + static_assert(layoutBindingFlags.size() == descriptorLayoutBindings.size()); + const vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = { + .pNext = &bindingFlagsCreateInfo, + .flags = vk::DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool, + .bindingCount = Cast(descriptorLayoutBindings.size()), + .pBindings = descriptorLayoutBindings.data(), + }; + AbortIfFailed(device->m_Device.createDescriptorSetLayout(&descriptorSetLayoutCreateInfo, nullptr, &m_SetLayout)); + + // One descriptor is enough. Updating it at any time is safe. (Update until submit, data held when pending) + // https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VK_EXT_descriptor_indexing.html + // https://github.com/KhronosGroup/Vulkan-Guide/blob/main/chapters/extensions/VK_EXT_descriptor_indexing.adoc + const vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo = { + .descriptorPool = m_DescriptorPool, + .descriptorSetCount = 1, + .pSetLayouts = &m_SetLayout, + }; + AbortIfFailed(device->m_Device.allocateDescriptorSets(&descriptorSetAllocateInfo, &m_DescriptorSet)); + + m_Device->SetName(m_SetLayout, "Bindless Layout"); + m_Device->SetName(m_DescriptorPool, "Bindless Pool"); + m_Device->SetName(m_DescriptorSet, "Bindless Set"); + + // NOTE: This needs to be synced with the destructor manually. + assert(Commit(m_Geometry.m_BackingBuffer.get()).m_Index == UNIFIED_GEOMETRY_DATA_HANDLE_INDEX); // Making an assumption to avoid extra bindings. +} + +RenderResourceManager::~RenderResourceManager() +{ + // NOTE: Matches the constructor. + Release(BufferHandle{0}); + +#if !defined(ASTER_NDEBUG) + WARN_IF(m_CommitedBufferCount > 0 || m_CommitedTextureCount > 0 || m_CommitedStorageTextureCount > 0, + "Resources alive: SSBO = {}, Textures = {}, RWTexture = {}", m_CommitedBufferCount, m_CommitedTextureCount, + m_CommitedStorageTextureCount); +#endif + + m_Geometry.Destroy(m_Device); + m_Index.Destroy(m_Device); + m_Material.Destroy(m_Device); + m_BufferManager.Destroy(m_Device); + m_TextureManager.Destroy(m_Device); + m_StorageTextureManager.Destroy(m_Device); + m_SamplerManager.Destroy(m_Device); + m_Device->m_Device.destroy(m_DescriptorPool, nullptr); + m_Device->m_Device.destroy(m_SetLayout, nullptr); +} + +RenderResourceManager::RenderResourceManager(RenderResourceManager &&other) noexcept + : m_WriteInfos(std::move(other.m_WriteInfos)) + , m_Writes(std::move(other.m_Writes)) + , m_WriteOwner(std::move(other.m_WriteOwner)) + , m_Geometry(std::move(other.m_Geometry)) + , m_Index(std::move(other.m_Index)) + , m_Material(std::move(other.m_Material)) + , m_BufferManager(std::move(other.m_BufferManager)) + , m_TextureManager(std::move(other.m_TextureManager)) + , m_StorageTextureManager(std::move(other.m_StorageTextureManager)) + , m_SamplerManager(std::move(other.m_SamplerManager)) + , m_Device(Take(other.m_Device)) + , m_DescriptorPool(other.m_DescriptorPool) + , m_SetLayout(other.m_SetLayout) + , m_DescriptorSet(other.m_DescriptorSet) + , m_UseBufferAddr(other.m_UseBufferAddr) +#if !defined(ASTER_NDEBUG) + , m_CommitedBufferCount(other.m_CommitedBufferCount) + , m_CommitedTextureCount(other.m_CommitedTextureCount) + , m_CommitedStorageTextureCount(other.m_CommitedStorageTextureCount) +#endif +{ + assert(!other.m_Device); +} + +RenderResourceManager & +RenderResourceManager::operator=(RenderResourceManager &&other) noexcept +{ + if (this == &other) + return *this; + m_WriteInfos = std::move(other.m_WriteInfos); + m_Writes = std::move(other.m_Writes); + m_WriteOwner = std::move(other.m_WriteOwner); + m_Geometry = std::move(other.m_Geometry); + m_Index = std::move(other.m_Index); + m_Material = std::move(other.m_Material); + m_BufferManager = std::move(other.m_BufferManager); + m_TextureManager = std::move(other.m_TextureManager); + m_StorageTextureManager = std::move(other.m_StorageTextureManager); + m_SamplerManager = std::move(other.m_SamplerManager); + m_Device = Take(other.m_Device); // Ensure taken. + m_DescriptorPool = other.m_DescriptorPool; + m_SetLayout = other.m_SetLayout; + m_DescriptorSet = other.m_DescriptorSet; + m_UseBufferAddr = other.m_UseBufferAddr; +#if !defined(ASTER_NDEBUG) + m_CommitedBufferCount = other.m_CommitedBufferCount; + m_CommitedTextureCount = other.m_CommitedTextureCount; + m_CommitedStorageTextureCount = other.m_CommitedStorageTextureCount; +#endif + + assert(!other.m_Device); + return *this; +} + +SamplerHandle +RenderResourceManager::CreateSampler(const vk::SamplerCreateInfo *samplerCreateInfo) +{ + return m_SamplerManager.Create(m_Device, samplerCreateInfo); +} + +GeometryHandle +RenderResourceManager::CreateGeometryBuffer(usize size, usize alignment, uptr* addr) +{ + GeometryHandle handle = {m_Geometry.Create(size, alignment).m_Index}; + + if (addr) + { + *addr = FetchAddress(handle); + } + + return handle; +} + +uptr +RenderResourceManager::FetchAddress(GeometryHandle handle) +{ + return (m_UseBufferAddr ? m_Geometry.m_BufferPtr : 0) + m_Geometry.FetchOffset(handle); +} + +void +RenderResourceManager::Write(GeometryHandle handle, usize offset, usize size, const void *data) +{ + m_Geometry.Write(handle, offset, size, data); +} + +void +RenderResourceManager::Release(GeometryHandle handle) +{ + if (handle.IsInvalid()) + return; + + m_Geometry.Release(handle); +} + +MaterialHandle +RenderResourceManager::CreateMaterialBuffer(usize size, usize alignment, uptr* addr) +{ + MaterialHandle handle = {m_Material.Create(size, alignment).m_Index}; + + if (addr) + { + *addr = FetchAddress(handle); + } + + return handle; +} + +usize +RenderResourceManager::FetchAddress(MaterialHandle handle) +{ + return (m_UseBufferAddr ? m_Material.m_BufferPtr : 0) + m_Material.FetchOffset(handle); +} + +void +RenderResourceManager::Write(MaterialHandle handle, usize offset, usize size, const void *data) +{ + m_Material.Write(handle, offset, size, data); +} + +void +RenderResourceManager::Release(MaterialHandle handle) +{ + if (handle.IsInvalid()) + return; + + m_Material.Release(handle); +} + +IndexHandle +RenderResourceManager::CreateIndexBuffer(usize size, usize alignment, u32 *firstIndex) +{ + IndexHandle handle = {m_Index.Create(size, alignment).m_Index}; + + if (firstIndex) + { + *firstIndex = FetchIndex(handle); + } + + return handle; +} + +u32 +RenderResourceManager::FetchIndex(IndexHandle handle) +{ + return Cast(m_Index.FetchOffset(handle) / sizeof(u32)); +} + +void +RenderResourceManager::Write(IndexHandle handle, usize offset, usize size, const void *data) +{ + m_Index.Write(handle, offset, size, data); +} + +void +RenderResourceManager::Release(IndexHandle handle) +{ + if (handle.IsInvalid()) + return; + + m_Index.Release(handle); +} + +vk::Buffer +RenderResourceManager::GetIndexBuffer() const +{ + return m_Index.m_BackingBuffer->m_Buffer; +} diff --git a/samples/04_scenes/render_resource_manager.h b/samples/04_scenes/render_resource_manager.h new file mode 100644 index 0000000..4f5b871 --- /dev/null +++ b/samples/04_scenes/render_resource_manager.h @@ -0,0 +1,250 @@ +// ============================================= +// Aster: render_resource_manager.h +// Copyright (c) 2020-2024 Anish Bhobe +// ============================================= + +#pragma once + +#include "global.h" + +#include +#include + +struct Device; +struct Texture; +struct StorageTexture; +struct StorageBuffer; +struct Buffer; + +struct GpuResourceHandle +{ + constexpr static u32 INVALID_HANDLE = MaxValue; + u32 m_Index = INVALID_HANDLE; // Default = invalid + + [[nodiscard]] bool + IsInvalid() const + { + return m_Index == INVALID_HANDLE; + } +}; + +struct BufferHandle : GpuResourceHandle +{ +}; + +struct TextureHandle : GpuResourceHandle +{ +}; + +struct StorageTextureHandle : GpuResourceHandle +{ +}; + +struct SamplerHandle : GpuResourceHandle +{ +}; + +struct VirtualizedBufferHandle : GpuResourceHandle +{ +}; + +struct GeometryHandle : VirtualizedBufferHandle +{ +}; + +struct IndexHandle : VirtualizedBufferHandle +{ +}; + +struct MaterialHandle : VirtualizedBufferHandle +{ +}; + +struct TextureManager +{ + eastl::vector m_Textures; + u32 m_MaxCapacity; + u32 m_FreeHead; + + void Init(u32 maxCapacity); + TextureHandle Commit(Texture *texture); + Texture *Fetch(TextureHandle handle); + void Release(const Device *device, TextureHandle handle); + void Destroy(const Device *device); +}; + +struct BufferManager +{ + eastl::vector m_Buffers; + u32 m_MaxCapacity; + u32 m_FreeHead; + + void Init(u32 maxCapacity); + BufferHandle Commit(StorageBuffer *buffer); + StorageBuffer *Fetch(BufferHandle handle); + void Release(const Device *device, BufferHandle handle); + void Destroy(const Device *device); +}; + +struct StorageTextureManager : TextureManager +{ + StorageTextureHandle Commit(StorageTexture *texture); + StorageTexture *Fetch(StorageTextureHandle handle); + void Release(const Device *device, StorageTextureHandle handle); +}; + +struct SamplerManager +{ + // There can only be so many samplers. + eastl::vector m_Samplers; + eastl::vector m_SamplerHashes; + + void Init(usize size); + SamplerHandle Create(const Device *device, const vk::SamplerCreateInfo *createInfo); + vk::Sampler Fetch(SamplerHandle handle); + void Destroy(const Device *device); +}; + +struct VirtualizedBufferPool +{ + // TODO: Use buffer device address + std::unique_ptr m_BackingBuffer; + uptr m_BufferPtr; + VmaVirtualBlock m_Block; + + struct VirtualBuffer + { + VmaVirtualAllocation m_Allocation; + usize m_Offset; + usize m_Size; + }; + + eastl::vector m_VirtualBuffers; + u32 m_FreeHead = GpuResourceHandle::INVALID_HANDLE; + bool m_Dirty = false; + + void InitStorage(const Device *device, usize bufferMaxSize); + void InitIndex(const Device *device, usize bufferMaxSize); + + // Sync the offset buffer if required. + // FUTURE(Bob): Handle the writes for non-ReBAR system. + void UpdateToGpu(const Device *device); + + VirtualizedBufferHandle Create(usize size, usize alignment); + usize FetchOffset(VirtualizedBufferHandle handle); + void Release(VirtualizedBufferHandle handle); + void Write(VirtualizedBufferHandle handle, usize offset, usize size, const void *data); + void Destroy(const Device *device); +}; + +struct RenderResourceManager +{ + private: + union WriteInfo { + vk::DescriptorBufferInfo uBufferInfo; + vk::DescriptorImageInfo uImageInfo; + vk::BufferView uBufferView; + + WriteInfo() + { + } + + explicit WriteInfo(vk::DescriptorBufferInfo info); + explicit WriteInfo(vk::DescriptorImageInfo info); + explicit WriteInfo(vk::BufferView info); + }; + + enum class HandleType + { + eBuffer, + eTexture, + eStorageTexture, + }; + + using WriteOwner = eastl::pair; + + eastl::deque m_WriteInfos; + eastl::vector m_Writes; + eastl::vector m_WriteOwner; + + vk::Sampler m_DefaultSampler; + + VirtualizedBufferPool m_Geometry; + VirtualizedBufferPool m_Index; + VirtualizedBufferPool m_Material; + BufferManager m_BufferManager; + TextureManager m_TextureManager; + StorageTextureManager m_StorageTextureManager; + SamplerManager m_SamplerManager; + + void EraseWrites(u32 handleIndex, HandleType handleType); + + public: + Device *m_Device; + + constexpr static u32 BUFFER_BINDING_INDEX = 0; + constexpr static u32 TEXTURE_BINDING_INDEX = 1; + constexpr static u32 STORAGE_TEXTURE_BINDING_INDEX = 2; + + constexpr static u32 UNIFIED_GEOMETRY_DATA_HANDLE_INDEX = 0; + constexpr static u32 UNIFIED_GEOMETRY_OFFSET_HANDLE_INDEX = 1; + constexpr static u32 MATERIAL_HANDLE_INDEX = 2; + + vk::SamplerCreateInfo m_DefaultSamplerCreateInfo; + + vk::DescriptorPool m_DescriptorPool; + vk::DescriptorSetLayout m_SetLayout; + vk::DescriptorSet m_DescriptorSet; + + bool m_UseBufferAddr; + + BufferHandle Commit(StorageBuffer *storageBuffer); // Commit to GPU and take Ownership + void Write(BufferHandle handle, usize offset, usize size, const void *data); // Write to buffer + void Release(BufferHandle handle); // Release and Destroy + void Release(StorageBuffer *storageBuffer, BufferHandle handle); // Release and Return + + TextureHandle CommitTexture(Texture *texture, SamplerHandle sampler = {}); // Commit to GPU and take Ownership + void Release(TextureHandle handle); // Release and Destroy + void Release(Texture *texture, TextureHandle handle); // Release and Return + + StorageTextureHandle CommitStorageTexture(StorageTexture *storageTexture, + SamplerHandle sampler = {}); // Commit to GPU and take Ownership + void Release(StorageTextureHandle handle); // Release and Destroy + void Release(StorageTexture *texture, StorageTextureHandle handle); // Release and Return + + SamplerHandle CreateSampler(const vk::SamplerCreateInfo *samplerCreateInfo); + + GeometryHandle CreateGeometryBuffer(usize size, usize alignment, uptr *addr = nullptr); + uptr FetchAddress(GeometryHandle handle); + void Write(GeometryHandle handle, usize offset, usize size, const void *data); + void Release(GeometryHandle handle); + + MaterialHandle CreateMaterialBuffer(usize size, usize alignment, uptr *addr = nullptr); + uptr FetchAddress(MaterialHandle handle); + void Write(MaterialHandle handle, usize offset, usize size, const void *data); + void Release(MaterialHandle handle); + + IndexHandle CreateIndexBuffer(usize size, usize alignment, u32 *firstIndex = nullptr); + u32 FetchIndex(IndexHandle handle); + void Write(IndexHandle handle, usize offset, usize size, const void *data); + void Release(IndexHandle handle); + + vk::Buffer GetIndexBuffer() const; + + void Update(); // Update all the descriptors required. + + // Ctor/Dtor + RenderResourceManager(Device *device, u16 maxSize, bool useBufferAddress = true); + ~RenderResourceManager(); + + RenderResourceManager(RenderResourceManager &&other) noexcept; + RenderResourceManager &operator=(RenderResourceManager &&other) noexcept; + +#if !defined(ASTER_NDEBUG) + usize m_CommitedBufferCount = 0; + usize m_CommitedTextureCount = 0; + usize m_CommitedStorageTextureCount = 0; +#endif + + DISALLOW_COPY_AND_ASSIGN(RenderResourceManager); +}; \ No newline at end of file diff --git a/samples/04_scenes/shader/model.frag.glsl b/samples/04_scenes/shader/model.frag.glsl new file mode 100644 index 0000000..3bec0da --- /dev/null +++ b/samples/04_scenes/shader/model.frag.glsl @@ -0,0 +1,18 @@ +#version 450 +#pragma shader_stage(fragment) +#extension GL_EXT_shader_explicit_arithmetic_types_int64 : enable +#extension GL_EXT_buffer_reference : require + +layout (location = 2) in vec4 inColor; +layout (location = 0) out vec4 outColor; + +layout(push_constant) uniform Constants { + mat4 globalTransform; + uint64_t vertexPos; + uint64_t vertexDat; + uint64_t materialIdx; +} pcb; + +void main() { + outColor = vec4(inColor.rgb, 1.0f); +} \ No newline at end of file diff --git a/samples/04_scenes/shader/model.vert.glsl b/samples/04_scenes/shader/model.vert.glsl new file mode 100644 index 0000000..50083d8 --- /dev/null +++ b/samples/04_scenes/shader/model.vert.glsl @@ -0,0 +1,56 @@ +#version 450 +#pragma shader_stage(vertex) +#extension GL_EXT_shader_explicit_arithmetic_types_int64 : enable +#extension GL_EXT_buffer_reference : require + +layout(location=0) out vec4 outWorldNormal; +layout(location=1) out vec4 outWorldPosition; +layout(location=2) out vec4 outColor; +layout(location=3) out vec2 outUV0; + +struct VertexData { + vec4 Normal; + vec2 TexCoord0; + vec2 TexCoord1; + vec4 Color; +}; + +layout(std430, buffer_reference, buffer_reference_align=16) readonly buffer VPositionRef { + vec4 Positions[]; +}; + +layout(std430, buffer_reference, buffer_reference_align=16) readonly buffer VDataRef { + VertexData Data[]; +}; + +layout(set=1, binding=0) uniform Camera { + mat4 View; // 64 + mat4 Projection; // 128 + mat4 InvView; // 192 + mat4 InvProjection; // 256 + vec4 Position; // 272 +} camera; + +layout(push_constant) uniform Constants { + mat4 globalTransform; + VPositionRef vertexPos; + VDataRef vertexDat; + uint64_t materialIdx; +} pcb; + +void main() { + vec3 colors[] = { + vec3( 1.0f, 0.0f, 0.0f ), + vec3( 0.0f, 1.0f, 0.0f ), + vec3( 0.0f, 0.0f, 1.0f ), + }; + + gl_Position = camera.Projection * camera.View * pcb.globalTransform * vec4(pcb.vertexPos.Positions[gl_VertexIndex].xyz, 1.0f); + outColor = vec4(pcb.vertexDat.Data[gl_VertexIndex].Color.rgb, 1.0f); //vec3(colors[gl_VertexIndex % 3]); + + // TODO + // layout(location=0) out vec4 outWorldNormal; + // layout(location=1) out vec4 outWorldPosition; + // layout(location=2) out vec4 outColor; + // layout(location=3) out vec2 outUV0; +} \ No newline at end of file diff --git a/samples/04_scenes/shader/model.vs.hlsl b/samples/04_scenes/shader/model.vs.hlsl new file mode 100644 index 0000000..0ac6b79 --- /dev/null +++ b/samples/04_scenes/shader/model.vs.hlsl @@ -0,0 +1,62 @@ + +struct VS_Input +{ + uint VertexIndex : SV_VertexID; +}; + +struct VS_Out { + float4 WorldNormal : NORMAL; + float4 WorldPosition : POSITION; + float4 Color : COLOR0; + float2 TexCoord0 : TEXCOORD0; +}; + +struct CameraData +{ + float4x4 View; // 64 + float4x4 Projection; // 128 + float4x4 InvView; // 192 + float4x4 InvProjection; // 256 + float4 Position; // 272 +}; + +[[vk::binding(0, 1)]] ConstantBuffer Camera; + +struct VertexData { + float4 Normal; + float2 TexCoord0; + float2 TexCoord1; + float4 Color; +}; + +[[vk::binding(0, 0)]] ByteAddressBuffer GeometryBuffer[]; + +layout(set=1, binding=0) uniform Camera { + float4x4 View; // 64 + float4x4 Projection; // 128 + float4x4 InvView; // 192 + float4x4 InvProjection; // 256 + float4 Position; // 272 +} Camera; + +layout(push_constant) uniform Constants { + mat4 globalTransform; + uint64 vertexPos; + VDataRef vertexDat; + uint64_t materialIdx; +} pcb; + +void main() { + VS_Output Output; + + float4 GlobalPosition = mul(globalTransform, GeometryBuffer[0].Load()); + float4 ClipSpace = mul(Camera.View, GlobalPosition); + + Output.VertexPosition = mul(Camera.Projection, ClipSpace); + Output.WorldPosition = GlobalPosition; + Output.UV0 = GetUV(StageInput.VertexIndex); + Output.VertexColor = GetColor(StageInput.VertexIndex); + + Output.WorldNormal = mul(GetNormalTransform(PushConstant.NodeIdx), GetNormal(StageInput.VertexIndex)); + return Output; +} \ No newline at end of file diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt index a157b3a..e7456b4 100644 --- a/samples/CMakeLists.txt +++ b/samples/CMakeLists.txt @@ -6,3 +6,4 @@ add_subdirectory("00_util") add_subdirectory("01_triangle") add_subdirectory("02_box") add_subdirectory("03_model_render") +add_subdirectory("04_scenes") diff --git a/vcpkg.json b/vcpkg.json index 75046db..fac4263 100644 --- a/vcpkg.json +++ b/vcpkg.json @@ -7,13 +7,14 @@ { "name": "imgui", "features": [ + "docking-experimental", "glfw-binding", - "vulkan-binding", - "docking-experimental" + "vulkan-binding" ] }, "scottt-debugbreak", "tinygltf", - "vulkan-memory-allocator" + "vulkan-memory-allocator", + "entt" ] }