[WIP] Using Device Addr to simplify objs.

This commit is contained in:
Anish Bhobe 2024-09-07 18:58:45 +02:00
parent c6987a9d5d
commit 978ed648fd
34 changed files with 3695 additions and 11 deletions

View File

@ -33,4 +33,10 @@ function(add_shader TARGET SHADER)
# Make sure our build depends on this output. # Make sure our build depends on this output.
set_source_files_properties(${current-output-path} PROPERTIES GENERATED TRUE) set_source_files_properties(${current-output-path} PROPERTIES GENERATED TRUE)
target_sources(${TARGET} PRIVATE ${current-output-path}) target_sources(${TARGET} PRIVATE ${current-output-path})
endfunction(add_shader) endfunction(add_shader)
function(add_shaders TARGET SHADERS)
foreach(shader IN ${SHADERS})
add_shader(TARGET ${shader})
endforeach()
endfunction(add_shaders)

View File

@ -95,17 +95,51 @@ UniformBuffer::Init(const Device *device, const usize size, const cstr name)
void void
StorageBuffer::Init(const Device *device, usize size, bool hostVisible, cstr name) StorageBuffer::Init(const Device *device, usize size, bool hostVisible, cstr name)
{ {
Init(device, size, hostVisible, false, name);
}
void
StorageBuffer::Init(const Device *device, usize size, bool hostVisible, bool deviceAddress, cstr name)
{
vk::BufferUsageFlags usage = vk::BufferUsageFlagBits::eStorageBuffer;
if (deviceAddress)
{
usage |= vk::BufferUsageFlagBits::eShaderDeviceAddress;
}
if (hostVisible) if (hostVisible)
{ {
Allocate(device, size, vk::BufferUsageFlagBits::eStorageBuffer, Allocate(device, size, usage,
VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT, VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT,
VMA_MEMORY_USAGE_AUTO, name); VMA_MEMORY_USAGE_AUTO, name);
} }
else else
{ {
Allocate(device, size, vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eTransferDst, usage |= vk::BufferUsageFlagBits::eTransferDst;
0, VMA_MEMORY_USAGE_AUTO, name); Allocate(device, size, usage, 0,
VMA_MEMORY_USAGE_AUTO, name);
}
}
void
StorageIndexBuffer::Init(const Device *device, usize size, bool hostVisible, bool deviceAddress, cstr name)
{
vk::BufferUsageFlags usage = vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eIndexBuffer;
if (deviceAddress)
{
usage |= vk::BufferUsageFlagBits::eShaderDeviceAddress;
}
if (hostVisible)
{
Allocate(device, size, usage,
VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT,
VMA_MEMORY_USAGE_AUTO, name);
}
else
{
usage |= vk::BufferUsageFlagBits::eTransferDst;
Allocate(device, size, usage, 0, VMA_MEMORY_USAGE_AUTO, name);
} }
} }

View File

@ -9,6 +9,8 @@
struct Device; struct Device;
// TODO Refactor the Buffer Hierarchy
struct Buffer struct Buffer
{ {
vk::Buffer m_Buffer = nullptr; vk::Buffer m_Buffer = nullptr;
@ -49,6 +51,12 @@ struct UniformBuffer : Buffer
struct StorageBuffer : Buffer struct StorageBuffer : Buffer
{ {
void Init(const Device *device, usize size, bool hostVisible, cstr name = nullptr); void Init(const Device *device, usize size, bool hostVisible, cstr name = nullptr);
void Init(const Device *device, usize size, bool hostVisible, bool deviceAddress, cstr name = nullptr);
};
struct StorageIndexBuffer : StorageBuffer
{
void Init(const Device *device, usize size, bool hostVisible, bool deviceAddress, cstr name = nullptr);
}; };
struct VertexBuffer : Buffer struct VertexBuffer : Buffer

View File

@ -11,7 +11,7 @@
#include <cstdio> #include <cstdio>
#include <glm/glm.hpp> #include <glm/glm.hpp>
#include <glm/gtc/quaternion.hpp> #include <glm/gtx/quaternion.hpp>
using c8 = char; using c8 = char;
using u8 = uint8_t; using u8 = uint8_t;

View File

@ -81,6 +81,7 @@ Device::Device(const Context *context, PhysicalDevice *physicalDevice, Features
}; };
const VmaAllocatorCreateInfo allocatorCreateInfo = { const VmaAllocatorCreateInfo allocatorCreateInfo = {
.flags = VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT,
.physicalDevice = m_PhysicalDevice, .physicalDevice = m_PhysicalDevice,
.device = m_Device, .device = m_Device,
.pVulkanFunctions = &vmaVulkanFunctions, .pVulkanFunctions = &vmaVulkanFunctions,

View File

@ -2,7 +2,7 @@
cmake_minimum_required(VERSION 3.13) cmake_minimum_required(VERSION 3.13)
add_executable(triangle "triangle.cpp") add_executable(triangle triangle.cpp)
add_shader(triangle shader/triangle.vert.glsl) add_shader(triangle shader/triangle.vert.glsl)
add_shader(triangle shader/triangle.frag.glsl) add_shader(triangle shader/triangle.frag.glsl)

BIN
samples/03_model_render/model/AlphaBlendModeTest.glb (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -584,8 +584,6 @@ main(int, char **)
cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline.m_Layout, 0, 1, cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline.m_Layout, 0, 1,
&resourceManager.m_DescriptorSet, 0, nullptr); &resourceManager.m_DescriptorSet, 0, nullptr);
cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline.m_Layout, 1, 1, &perFrameDescriptor, 0,
nullptr);
cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline.m_Layout, 1, 1, &perFrameDescriptor, 0, cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline.m_Layout, 1, 1, &perFrameDescriptor, 0,
nullptr); nullptr);

View File

@ -0,0 +1,29 @@
# CMakeList.txt ; CMake project for box
cmake_minimum_required(VERSION 3.13)
#set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined -fsanitize=address")
find_path(TINYGLTF_INCLUDE_DIRS "tiny_gltf.h")
find_package(EnTT REQUIRED CONFIG)
add_executable(scene_render main.cpp
render_resource_manager.cpp render_resource_manager.h
asset_loader.cpp asset_loader.h
pipeline_utils.cpp pipeline_utils.h
core_components.h
ecs_adapter.h
camera.h)
add_shader(scene_render shader/model.frag.glsl)
add_shader(scene_render shader/model.vert.glsl)
# add_shader(scene_render shader/model.vs.hlsl)
target_link_libraries(scene_render PRIVATE aster_core)
target_link_libraries(scene_render PRIVATE util_helper)
target_link_libraries(scene_render PRIVATE EnTT::EnTT)
target_include_directories(scene_render PRIVATE ${TINYGLTF_INCLUDE_DIRS})
add_resource_dir(scene_render model)
add_resource_dir(scene_render image)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,111 @@
// =============================================
// Aster: asset_loader.h
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "global.h"
#include "buffer.h"
#include "render_resource_manager.h"
#include "ecs_adapter.h"
namespace tinygltf
{
class Model;
struct Image;
} // namespace tinygltf
struct Image;
struct Texture;
constexpr auto GLTF_ASCII_FILE_EXTENSION = ".gltf";
constexpr auto GLTF_BINARY_FILE_EXTENSION = ".glb";
struct Material
{
vec4 m_AlbedoFactor; // 16 16
vec3 m_EmissionFactor; // 12 28
f32 m_MetalFactor; // 04 32
f32 m_RoughFactor; // 04 36
TextureHandle m_AlbedoTex; // 04 40
TextureHandle m_NormalTex; // 04 44
TextureHandle m_MetalRoughTex; // 04 48
TextureHandle m_OcclusionTex; // 04 52
TextureHandle m_EmissionTex; // 04 56
static constexpr usize ALIGNMENT = 4;
};
static_assert(sizeof(Material) == 56);
struct VertexData
{
vec4 m_Normal;
vec2 m_TexCoord0 = vec2{0.0f, 0.0f};
vec2 m_TexCoord1 = vec2{0.0f, 0.0f};
vec4 m_Color0 = vec4{1.0f, 1.0f, 1.0f, 1.0f};
static constexpr usize ALIGNMENT = 16;
};
struct Model
{
eastl::vector<TextureHandle> m_Textures;
eastl::vector<Entity> m_Entities;
IndexHandle m_IndexHandle;
GeometryHandle m_VertexPositionHandle;
GeometryHandle m_VertexDataHandle;
MaterialHandle m_MaterialHandle;
Entity m_RootEntity;
void Destroy(RenderResourceManager *resourceManager, EcsRegistry *registry);
};
struct AssetLoader
{
RenderResourceManager *m_ResourceManager;
entt::registry *m_Registry;
vk::CommandPool m_CommandPool;
vk::CommandBuffer m_CommandBuffer;
vk::Queue m_TransferQueue;
u32 m_TransferQueueIndex;
u32 m_GraphicsQueueIndex;
void LoadHdrImage(Texture *texture, cstr path, cstr name = nullptr) const;
Model LoadModelToGpu(cstr path, cstr name = nullptr);
constexpr static auto ANormal = "NORMAL";
constexpr static auto APosition = "POSITION";
constexpr static auto ATangent = "TANGENT";
constexpr static auto ATexCoord0 = "TEXCOORD_0";
constexpr static auto ATexCoord1 = "TEXCOORD_1";
constexpr static auto AColor0 = "COLOR_0";
constexpr static auto AJoints0 = "JOINTS_0";
constexpr static auto AWeights0 = "WEIGHTS_0";
AssetLoader(RenderResourceManager *resourceManager, EcsRegistry *registry, vk::Queue transferQueue,
u32 transferQueueIndex, u32 graphicsQueueIndex);
~AssetLoader();
AssetLoader(AssetLoader &&other) noexcept;
AssetLoader &operator=(AssetLoader &&other) noexcept;
private:
TextureHandle LoadImageToGpu(StagingBuffer *stagingBuffer, tinygltf::Image *image, bool isSrgb) const;
void
ProcessNode(tinygltf::Model *model, eastl::vector<vec4> *vertexPositions, eastl::vector<VertexData> *vertexData,
eastl::vector<u32> *indices, eastl::vector<Entity> *entities, const std::function<u32(i32)> &loadMaterial, int current, Entity parent);
public:
DISALLOW_COPY_AND_ASSIGN(AssetLoader);
};
void GenerateMipMaps(vk::CommandBuffer commandBuffer, Texture *texture, vk::ImageLayout initialLayout,
vk::ImageLayout finalLayout,
vk::PipelineStageFlags2 prevStage = vk::PipelineStageFlagBits2::eAllCommands,
vk::PipelineStageFlags2 finalStage = vk::PipelineStageFlagBits2::eAllCommands);

View File

@ -0,0 +1,98 @@
// =============================================
// Aster: camera.h
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "global.h"
struct Camera
{
mat4 m_View;
mat4 m_Perspective;
mat4 m_InverseView;
mat4 m_InversePerspective;
vec3 m_Position;
f32 m_PositionHomogenousPad_ = 1.0f;
void
CalculateInverses()
{
m_InverseView = inverse(m_View);
m_InversePerspective = inverse(m_Perspective);
}
};
struct CameraController
{
constexpr static vec3 UP = vec3(0.0f, 1.0f, 0.0f);
f32 m_Fov;
f32 m_Pitch;
f32 m_Yaw;
f32 m_AspectRatio;
Camera m_Camera;
CameraController(const vec3 &position, const vec3 &target, const f32 vFov, const f32 aspectRatio)
: m_Fov(vFov)
, m_Pitch{0.0f}
, m_Yaw{0.0f}
, m_AspectRatio{aspectRatio}
, m_Camera{
.m_View = lookAt(position, target, UP),
.m_Perspective = glm::perspective(vFov, aspectRatio, 0.1f, 100.0f),
.m_Position = position,
}
{
const vec3 dir = normalize(target - vec3(position));
m_Pitch = asin(dir.y);
m_Yaw = acos(-dir.z / sqrt(1.0f - dir.y * dir.y));
m_Camera.CalculateInverses();
}
void
SetAspectRatio(const f32 aspectRatio)
{
m_AspectRatio = aspectRatio;
m_Camera.m_Perspective = glm::perspective(m_Fov, aspectRatio, 0.1f, 100.0f);
m_Camera.CalculateInverses();
}
void
SetPosition(const vec3 &position)
{
m_Camera.m_Position = vec4(position, 1.0f);
f32 cosPitch = cos(m_Pitch);
const vec3 target = vec3(sin(m_Yaw) * cosPitch, sin(m_Pitch), -cos(m_Yaw) * cosPitch);
m_Camera.m_View = lookAt(position, position + target, UP);
m_Camera.CalculateInverses();
}
void
SetPitchYaw(f32 pitch, f32 yaw)
{
m_Pitch = pitch;
m_Yaw = yaw;
f32 cosPitch = cos(m_Pitch);
const vec3 target = vec3(sin(m_Yaw) * cosPitch, sin(m_Pitch), -cos(m_Yaw) * cosPitch);
const vec3 position = m_Camera.m_Position;
m_Camera.m_View = lookAt(position, position + target, UP);
m_Camera.CalculateInverses();
}
void
SetLookAt(const vec3 &target)
{
const vec3 dir = normalize(target - m_Camera.m_Position);
m_Pitch = acos(dir.y);
m_Yaw = acos(dir.z / sqrt(1.0f - dir.y * dir.y));
m_Camera.m_View = lookAt(m_Camera.m_Position, m_Camera.m_Position + target, UP);
m_Camera.CalculateInverses();
}
};

View File

@ -0,0 +1,49 @@
// =============================================
// Aster: core_components.h
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "global.h"
template <typename TComponent>
struct CDirty
{
using RelatedComponentType = TComponent;
};
template <typename TComponent>
struct CParent
{
using RelatedComponentType = TComponent;
entt::entity m_ParentEntity = NULL_ENTITY;
};
struct CDynamicTransform
{
vec3 m_Position = vec3{0.0f};
quat m_Rotation = glm::identity<quat>();
vec3 m_Scale = vec3{1.0f};
};
struct CStaticTransform
{};
struct CGlobalTransform
{
mat4 m_Transform = glm::identity<mat4>();
};
struct CMaterial
{
uptr m_MaterialPtr;
};
struct CMesh
{
uptr m_VertexPositionPtr;
uptr m_VertexDataPtr;
u32 m_FirstIndex;
u32 m_IndexCount;
};

View File

@ -0,0 +1,21 @@
// =============================================
// Aster: ecs_adapter.h
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include <entt/entt.hpp>
using EcsRegistry = entt::registry;
using Entity = entt::entity;
template <typename... T>
using Without = entt::exclude_t<T...>;
[[nodiscard]]
inline bool Exists(Entity entity)
{
return entity != entt::null;
}
constexpr Entity NULL_ENTITY = entt::null;

View File

@ -0,0 +1,2 @@
*.hdr filter=lfs diff=lfs merge=lfs -text
*.exr filter=lfs diff=lfs merge=lfs -text

529
samples/04_scenes/main.cpp Normal file
View File

@ -0,0 +1,529 @@
// =============================================
// Aster: main.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "context.h"
#include "device.h"
#include "helpers.h"
#include "physical_device.h"
#include "render_resource_manager.h"
#include "swapchain.h"
#include "window.h"
#include "asset_loader.h"
#include "camera.h"
#include "core_components.h"
#include "ecs_adapter.h"
#include "frame.h"
#include "image.h"
#include "pipeline.h"
#include "pipeline_utils.h"
constexpr u32 MAX_FRAMES_IN_FLIGHT = 3;
constexpr auto PIPELINE_CACHE_FILE = "PipelineCacheData.bin";
constexpr auto MODEL_FILE = "model/DamagedHelmet.glb";
constexpr auto MODEL_FILE2 = "model/Box.glb";
constexpr auto BACKDROP_FILE = "image/photo_studio_loft_hall_4k.hdr";
constexpr u32 INIT_WIDTH = 640;
constexpr u32 INIT_HEIGHT = 480;
int
main(int, char *[])
{
MIN_LOG_LEVEL(Logger::LogType::eInfo);
Context context = {"Scene Render [WIP]", VERSION};
Window window = {"Scene Render [WIP] (Aster)", &context, {INIT_WIDTH, INIT_HEIGHT}};
PhysicalDevices physicalDevices = {&window, &context};
PhysicalDevice deviceToUse = FindSuitableDevice(physicalDevices);
usize physicalDeviceOffsetAlignment = deviceToUse.m_DeviceProperties.limits.minUniformBufferOffsetAlignment;
vk::Extent2D internalResolution = {1920, 1080};
internalResolution.width = (internalResolution.height * INIT_WIDTH) / INIT_HEIGHT;
CameraController cameraController = {vec3{0.0f, 0.0f, 2.0f}, vec3{0.0f}, 70_deg,
Cast<f32>(internalResolution.width) / Cast<f32>(internalResolution.height)};
INFO("Using {} as the primary device.", deviceToUse.m_DeviceProperties.deviceName.data());
Features enabledDeviceFeatures = {
.m_Vulkan10Features =
{
.samplerAnisotropy = true,
.shaderInt64 = true,
},
.m_Vulkan12Features =
{
.descriptorIndexing = true,
.shaderSampledImageArrayNonUniformIndexing = true,
.shaderStorageBufferArrayNonUniformIndexing = true,
.shaderStorageImageArrayNonUniformIndexing = true,
.descriptorBindingUniformBufferUpdateAfterBind = true, // Not related to Bindless
.descriptorBindingSampledImageUpdateAfterBind = true,
.descriptorBindingStorageImageUpdateAfterBind = true,
.descriptorBindingStorageBufferUpdateAfterBind = true,
.descriptorBindingPartiallyBound = true,
.runtimeDescriptorArray = true,
.bufferDeviceAddress = true,
.bufferDeviceAddressCaptureReplay = true,
},
.m_Vulkan13Features =
{
.synchronization2 = true,
.dynamicRendering = true,
},
};
auto attachmentFormat = vk::Format::eR8G8B8A8Srgb;
auto pipelineCacheData = ReadFileBytes(PIPELINE_CACHE_FILE, false);
QueueAllocation queueAllocation = FindAppropriateQueueAllocation(&deviceToUse);
Device device = {&context, &deviceToUse, &enabledDeviceFeatures,
{queueAllocation}, pipelineCacheData, "Primary Device"};
vk::Queue graphicsQueue = device.GetQueue(queueAllocation.m_Family, 0);
Swapchain swapchain = {&window, &device, "Primary Chain"};
RenderResourceManager resourceManager = {&device, 1024};
EcsRegistry registry;
AssetLoader assetLoader = {&resourceManager, &registry, graphicsQueue, queueAllocation.m_Family,
queueAllocation.m_Family};
Model model = assetLoader.LoadModelToGpu(MODEL_FILE, "Main Model");
Model model2 = assetLoader.LoadModelToGpu(MODEL_FILE2, "Main Model 2");
registry.get<CDynamicTransform>(model2.m_RootEntity).m_Position.x += 1.0f;
UniformBuffer ubo;
ubo.Init(&device, sizeof cameraController.m_Camera, "Desc1 UBO");
ubo.Write(&device, 0, sizeof cameraController.m_Camera, &cameraController.m_Camera);
Pipeline pipeline = CreatePipeline(&device, attachmentFormat, &resourceManager);
vk::DescriptorPool descriptorPool;
vk::DescriptorSet perFrameDescriptor;
{
vk::DescriptorSetLayout descriptorSetLayout = pipeline.m_SetLayouts[1];
eastl::array poolSizes = {
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eUniformBuffer,
.descriptorCount = 3,
},
};
vk::DescriptorPoolCreateInfo descriptorPoolCreateInfo = {
.maxSets = 1, .poolSizeCount = Cast<u32>(poolSizes.size()), .pPoolSizes = poolSizes.data()};
AbortIfFailed(device.m_Device.createDescriptorPool(&descriptorPoolCreateInfo, nullptr, &descriptorPool));
vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo = {
.descriptorPool = descriptorPool,
.descriptorSetCount = 1,
.pSetLayouts = &descriptorSetLayout,
};
AbortIfFailed(device.m_Device.allocateDescriptorSets(&descriptorSetAllocateInfo, &perFrameDescriptor));
}
vk::DescriptorBufferInfo cameraBufferInfo = {
.buffer = ubo.m_Buffer,
.offset = 0,
.range = sizeof(Camera),
};
eastl::array writeDescriptors = {
vk::WriteDescriptorSet{
.dstSet = perFrameDescriptor,
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eUniformBuffer,
.pBufferInfo = &cameraBufferInfo,
},
};
device.m_Device.updateDescriptorSets(Cast<u32>(writeDescriptors.size()), writeDescriptors.data(), 0, nullptr);
// Persistent variables
vk::Viewport viewport = {
.x = 0,
.y = Cast<f32>(internalResolution.height),
.width = Cast<f32>(internalResolution.width),
.height = -Cast<f32>(internalResolution.height),
.minDepth = 0.0,
.maxDepth = 1.0,
};
vk::Rect2D scissor = {
.offset = {0, 0},
.extent = internalResolution,
};
vk::ImageSubresourceRange subresourceRange = {
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
};
vk::ImageMemoryBarrier2 preRenderBarrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eTopOfPipe,
.srcAccessMask = vk::AccessFlagBits2::eNone,
.dstStageMask = vk::PipelineStageFlagBits2::eColorAttachmentOutput,
.dstAccessMask = vk::AccessFlagBits2::eColorAttachmentWrite,
.oldLayout = vk::ImageLayout::eUndefined,
.newLayout = vk::ImageLayout::eColorAttachmentOptimal,
.srcQueueFamilyIndex = queueAllocation.m_Family,
.dstQueueFamilyIndex = queueAllocation.m_Family,
.subresourceRange = subresourceRange,
};
vk::DependencyInfo preRenderDependencies = {
.imageMemoryBarrierCount = 1,
.pImageMemoryBarriers = &preRenderBarrier,
};
vk::ImageMemoryBarrier2 renderToBlitBarrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eColorAttachmentOutput,
.srcAccessMask = vk::AccessFlagBits2::eColorAttachmentWrite,
.dstStageMask = vk::PipelineStageFlagBits2::eAllTransfer,
.dstAccessMask = vk::AccessFlagBits2::eTransferRead,
.oldLayout = vk::ImageLayout::eColorAttachmentOptimal,
.newLayout = vk::ImageLayout::eTransferSrcOptimal,
.srcQueueFamilyIndex = queueAllocation.m_Family,
.dstQueueFamilyIndex = queueAllocation.m_Family,
.subresourceRange = subresourceRange,
};
vk::ImageMemoryBarrier2 acquireToTransferDstBarrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eTopOfPipe,
.srcAccessMask = vk::AccessFlagBits2::eNone,
.dstStageMask = vk::PipelineStageFlagBits2::eAllTransfer,
.dstAccessMask = vk::AccessFlagBits2::eTransferWrite,
.oldLayout = vk::ImageLayout::eUndefined,
.newLayout = vk::ImageLayout::eTransferDstOptimal,
.srcQueueFamilyIndex = queueAllocation.m_Family,
.dstQueueFamilyIndex = queueAllocation.m_Family,
.subresourceRange = subresourceRange,
};
eastl::array postRenderBarriers = {
renderToBlitBarrier,
acquireToTransferDstBarrier,
};
vk::DependencyInfo postRenderDependencies = {
.imageMemoryBarrierCount = Cast<u32>(postRenderBarriers.size()),
.pImageMemoryBarriers = postRenderBarriers.data(),
};
vk::ImageMemoryBarrier2 transferDstToGuiRenderBarrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eAllTransfer,
.srcAccessMask = vk::AccessFlagBits2::eTransferWrite,
.dstStageMask = vk::PipelineStageFlagBits2::eColorAttachmentOutput,
.dstAccessMask = vk::AccessFlagBits2::eColorAttachmentWrite,
.oldLayout = vk::ImageLayout::eTransferDstOptimal,
.newLayout = vk::ImageLayout::eColorAttachmentOptimal,
.srcQueueFamilyIndex = queueAllocation.m_Family,
.dstQueueFamilyIndex = queueAllocation.m_Family,
.subresourceRange = subresourceRange,
};
vk::DependencyInfo preGuiDependencies = {
.imageMemoryBarrierCount = 1,
.pImageMemoryBarriers = &transferDstToGuiRenderBarrier,
};
vk::ImageMemoryBarrier2 prePresentBarrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eColorAttachmentOutput,
.srcAccessMask = vk::AccessFlagBits2::eColorAttachmentWrite,
.dstStageMask = vk::PipelineStageFlagBits2::eBottomOfPipe,
.dstAccessMask = vk::AccessFlagBits2::eNone,
.oldLayout = vk::ImageLayout::eColorAttachmentOptimal,
.newLayout = vk::ImageLayout::ePresentSrcKHR,
.srcQueueFamilyIndex = queueAllocation.m_Family,
.dstQueueFamilyIndex = queueAllocation.m_Family,
.subresourceRange = subresourceRange,
};
vk::DependencyInfo prePresentDependencies = {
.imageMemoryBarrierCount = 1,
.pImageMemoryBarriers = &prePresentBarrier,
};
FrameManager frameManager = {&device, queueAllocation.m_Family, MAX_FRAMES_IN_FLIGHT};
eastl::fixed_vector<DepthImage, MAX_FRAMES_IN_FLIGHT> depthImages(frameManager.m_FramesInFlight);
eastl::fixed_vector<AttachmentImage, MAX_FRAMES_IN_FLIGHT> attachmentImages(frameManager.m_FramesInFlight);
{
auto depthIter = depthImages.begin();
auto attachmentIter = attachmentImages.begin();
for (u32 index = 0; index < frameManager.m_FramesInFlight; ++index)
{
auto name = fmt::format("Depth Frame{}", index);
depthIter->Init(&device, internalResolution, name.c_str());
name = fmt::format("Attachment0 Frame{}", index);
attachmentIter->Init(&device, internalResolution, attachmentFormat, name.c_str());
++depthIter;
++attachmentIter;
}
}
struct NodeData
{
mat4 m_Transform;
uptr m_VertexPositionPtr;
uptr m_VertexDataPtr;
uptr m_MaterialPtr;
// TODO: Remove
u32 m_FirstIndex;
u32 m_IndexCount;
};
eastl::fixed_vector<eastl::vector<NodeData>, MAX_FRAMES_IN_FLIGHT> perFrameNodeData(frameManager.m_FramesInFlight);
eastl::fixed_vector<BufferHandle, MAX_FRAMES_IN_FLIGHT> perFrameNodeBuffer(frameManager.m_FramesInFlight);
for (auto &bufferHandle : perFrameNodeBuffer)
{
StorageBuffer buffer;
buffer.Init(&device, sizeof(NodeData) * 100'000, true);
bufferHandle = resourceManager.Commit(&buffer);
}
swapchain.RegisterResizeCallback(
[&cameraController, &internalResolution, &viewport, &scissor](vk::Extent2D extent) {
cameraController.SetAspectRatio(Cast<f32>(extent.width) / Cast<f32>(extent.height));
internalResolution.width = Cast<u32>(Cast<f32>(internalResolution.height) * cameraController.m_AspectRatio);
viewport.y = Cast<f32>(internalResolution.height);
viewport.width = Cast<f32>(internalResolution.width);
viewport.height = -Cast<f32>(internalResolution.height);
scissor.extent = internalResolution;
});
auto sortByParentHier = [&registry](Entity a, Entity b) {
const auto parent = registry.try_get<CParent<CDynamicTransform>>(b);
return parent && parent->m_ParentEntity == a;
};
registry.sort<CParent<CDynamicTransform>>(sortByParentHier);
Time::Init();
auto rootNodeUpdateView = registry.view<CDynamicTransform, CGlobalTransform>(Without<CParent<CDynamicTransform>>{});
auto nodeWithParentsUpdateView = registry.view<CDynamicTransform, CParent<CDynamicTransform>, CGlobalTransform>();
nodeWithParentsUpdateView.use<CParent<CDynamicTransform>>();
auto renderableObjectsGroup = registry.group<CGlobalTransform, CMesh, CMaterial>();
resourceManager.Update();
while (window.Poll())
{
Time::Update();
auto *rot = &registry.get<CDynamicTransform>(model.m_RootEntity).m_Rotation;
*rot = glm::rotate(*rot, Cast<f32>(30_deg * Time::m_Delta), vec3{0.0f, 1.0f, 0.0f});
Frame *currentFrame = frameManager.GetNextFrame(&swapchain, &window);
u32 imageIndex = currentFrame->m_ImageIdx;
vk::Image currentSwapchainImage = swapchain.m_Images[imageIndex];
vk::ImageView currentSwapchainImageView = swapchain.m_ImageViews[imageIndex];
vk::CommandBuffer cmd = currentFrame->m_CommandBuffer;
DepthImage *currentDepthImage = &depthImages[currentFrame->m_FrameIdx];
AttachmentImage *currentAttachment = &attachmentImages[currentFrame->m_FrameIdx];
if (currentAttachment->m_Extent.width != internalResolution.width ||
currentAttachment->m_Extent.height != internalResolution.height)
{
auto name = fmt::format("Depth Frame{}", currentFrame->m_FrameIdx);
currentDepthImage->Destroy(&device);
currentDepthImage->Init(&device, internalResolution, name.c_str());
name = fmt::format("Attachment0 Frame{}", currentFrame->m_FrameIdx);
currentAttachment->Destroy(&device);
currentAttachment->Init(&device, internalResolution, attachmentFormat, name.c_str());
}
vk::ImageView currentDepthImageView = currentDepthImage->m_View;
vk::Image currentImage = currentAttachment->m_Image;
vk::ImageView currentImageView = currentAttachment->m_View;
preRenderBarrier.image = currentImage;
postRenderBarriers[0].image = currentImage;
postRenderBarriers[1].image = currentSwapchainImage;
transferDstToGuiRenderBarrier.image = currentSwapchainImage;
prePresentBarrier.image = currentSwapchainImage;
ubo.Write(&device, 0, sizeof cameraController.m_Camera, &cameraController.m_Camera);
for (auto [entity, dynTransform, globalTransform] : rootNodeUpdateView.each())
{
auto scale = glm::scale(mat4{1.0f}, dynTransform.m_Scale);
auto rotation = glm::toMat4(dynTransform.m_Rotation);
auto translation = glm::translate(mat4{1.0f}, dynTransform.m_Position);
globalTransform.m_Transform = translation * rotation * scale;
}
// Has been sorted and ordered by parent.
for (auto [entity, dynTransform, parent, globalTransform] : nodeWithParentsUpdateView.each())
{
auto scale = glm::scale(mat4{1.0f}, dynTransform.m_Scale);
auto rotation = glm::toMat4(dynTransform.m_Rotation);
auto translation = glm::translate(mat4{1.0f}, dynTransform.m_Position);
globalTransform.m_Transform =
registry.get<CGlobalTransform>(parent.m_ParentEntity).m_Transform * translation * rotation * scale;
}
usize objectCount = renderableObjectsGroup.size();
auto *nodeData = &perFrameNodeData[currentFrame->m_FrameIdx];
nodeData->clear();
nodeData->reserve(objectCount);
for (auto [entity, globalTransform, mesh, material] : renderableObjectsGroup.each())
{
nodeData->push_back({
.m_Transform = globalTransform.m_Transform,
.m_VertexPositionPtr = mesh.m_VertexPositionPtr,
.m_VertexDataPtr = mesh.m_VertexDataPtr,
.m_MaterialPtr = material.m_MaterialPtr,
.m_FirstIndex = mesh.m_FirstIndex,
.m_IndexCount = mesh.m_IndexCount,
});
}
resourceManager.Write(perFrameNodeBuffer[currentFrame->m_FrameIdx], 0, objectCount * sizeof(NodeData),
nodeData->data());
vk::CommandBufferBeginInfo beginInfo = {.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit};
AbortIfFailed(cmd.begin(&beginInfo));
cmd.pipelineBarrier2(&preRenderDependencies);
// Render
eastl::array attachmentInfos = {
vk::RenderingAttachmentInfo{
.imageView = currentImageView,
.imageLayout = vk::ImageLayout::eColorAttachmentOptimal,
.resolveMode = vk::ResolveModeFlagBits::eNone,
.loadOp = vk::AttachmentLoadOp::eClear,
.storeOp = vk::AttachmentStoreOp::eStore,
.clearValue = vk::ClearColorValue{0.0f, 0.0f, 0.0f, 1.0f},
},
};
vk::RenderingAttachmentInfo depthAttachment = {
.imageView = currentDepthImageView,
.imageLayout = vk::ImageLayout::eDepthAttachmentOptimal,
.resolveMode = vk::ResolveModeFlagBits::eNone,
.loadOp = vk::AttachmentLoadOp::eClear,
.storeOp = vk::AttachmentStoreOp::eDontCare,
.clearValue = vk::ClearDepthStencilValue{.depth = 1.0f, .stencil = 0},
};
vk::RenderingInfo renderingInfo = {
.renderArea = {.extent = ToExtent2D(currentAttachment->m_Extent)},
.layerCount = 1,
.colorAttachmentCount = Cast<u32>(attachmentInfos.size()),
.pColorAttachments = attachmentInfos.data(),
.pDepthAttachment = &depthAttachment,
};
cmd.beginRendering(&renderingInfo);
cmd.setViewport(0, 1, &viewport);
cmd.setScissor(0, 1, &scissor);
cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline.m_Layout, 0, 1,
&resourceManager.m_DescriptorSet, 0, nullptr);
cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline.m_Layout, 1, 1, &perFrameDescriptor, 0,
nullptr);
//TODO("Unify index buffers");
cmd.bindIndexBuffer(resourceManager.GetIndexBuffer(), 0, vk::IndexType::eUint32);
cmd.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline.m_Pipeline);
// TODO("Get the data to the GPU");
// auto nodeHandle = perFrameNodeBuffer[currentFrame->m_FrameIdx];
auto &nodeBuffer = perFrameNodeData[currentFrame->m_FrameIdx];
for (auto &node : nodeBuffer)
{
cmd.pushConstants(pipeline.m_Layout, vk::ShaderStageFlagBits::eAll, 0, sizeof node, &node);
cmd.drawIndexed(node.m_IndexCount, 1, node.m_FirstIndex, 0, 0);
}
cmd.endRendering();
cmd.pipelineBarrier2(&postRenderDependencies);
vk::ImageBlit blitRegion = {
.srcSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.srcOffsets =
std::array{
vk::Offset3D{0, 0, 0},
ToOffset3D(currentAttachment->m_Extent),
},
.dstSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.dstOffsets =
std::array{
vk::Offset3D{0, 0, 0},
vk::Offset3D{Cast<i32>(swapchain.m_Extent.width), Cast<i32>(swapchain.m_Extent.height), 1},
},
};
cmd.blitImage(currentImage, postRenderBarriers[0].newLayout, currentSwapchainImage,
postRenderBarriers[1].newLayout, 1, &blitRegion, vk::Filter::eLinear);
cmd.pipelineBarrier2(&preGuiDependencies);
cmd.pipelineBarrier2(&prePresentDependencies);
AbortIfFailed(cmd.end());
vk::PipelineStageFlags waitDstStage = vk::PipelineStageFlagBits::eColorAttachmentOutput;
vk::SubmitInfo submitInfo = {
.waitSemaphoreCount = 1,
.pWaitSemaphores = &currentFrame->m_ImageAcquireSem,
.pWaitDstStageMask = &waitDstStage,
.commandBufferCount = 1,
.pCommandBuffers = &cmd,
.signalSemaphoreCount = 1,
.pSignalSemaphores = &currentFrame->m_RenderFinishSem,
};
AbortIfFailed(graphicsQueue.submit(1, &submitInfo, currentFrame->m_FrameAvailableFence));
currentFrame->Present(graphicsQueue, &swapchain, &window);
}
device.WaitIdle();
for (auto bufferHandle : perFrameNodeBuffer)
{
resourceManager.Release(bufferHandle);
}
for (auto depthImage : depthImages)
{
depthImage.Destroy(&device);
}
for (auto attachmentImage : attachmentImages)
{
attachmentImage.Destroy(&device);
}
ubo.Destroy(&device);
device.m_Device.destroy(descriptorPool, nullptr);
model.Destroy(&resourceManager, &registry);
model2.Destroy(&resourceManager, &registry);
}

BIN
samples/04_scenes/model/AlphaBlendModeTest.glb (Stored with Git LFS) Normal file

Binary file not shown.

BIN
samples/04_scenes/model/Box.glb (Stored with Git LFS) Normal file

Binary file not shown.

BIN
samples/04_scenes/model/DamagedHelmet.glb (Stored with Git LFS) Normal file

Binary file not shown.

BIN
samples/04_scenes/model/MarbleBust/marble_bust_01.bin (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -0,0 +1,161 @@
{
"asset": {
"generator": "Khronos glTF Blender I/O v1.6.16",
"version": "2.0"
},
"scene": 0,
"scenes": [
{
"name": "Scene",
"nodes": [
0
]
}
],
"nodes": [
{
"mesh": 0,
"name": "marble_bust_01",
"translation": [
0,
0.028335653245449066,
0
]
}
],
"materials": [
{
"doubleSided": true,
"name": "marble_bust_01",
"normalTexture": {
"index": 0
},
"pbrMetallicRoughness": {
"baseColorTexture": {
"index": 1
},
"metallicFactor": 0,
"metallicRoughnessTexture": {
"index": 2
}
}
}
],
"meshes": [
{
"name": "marble_bust_01",
"primitives": [
{
"attributes": {
"POSITION": 0,
"NORMAL": 1,
"TEXCOORD_0": 2
},
"indices": 3,
"material": 0
}
]
}
],
"textures": [
{
"sampler": 0,
"source": 0
},
{
"sampler": 0,
"source": 1
},
{
"sampler": 0,
"source": 2
}
],
"images": [
{
"mimeType": "image/jpeg",
"name": "marble_bust_01_nor_gl",
"uri": "textures/marble_bust_01_nor_gl_4k.jpg"
},
{
"mimeType": "image/jpeg",
"name": "marble_bust_01_diff",
"uri": "textures/marble_bust_01_diff_4k.jpg"
},
{
"mimeType": "image/jpeg",
"name": "marble_bust_01_arm",
"uri": "textures/marble_bust_01_rough_4k.jpg"
}
],
"accessors": [
{
"bufferView": 0,
"componentType": 5126,
"count": 9746,
"max": [
0.14886942505836487,
0.48668384552001953,
0.1551172435283661
],
"min": [
-0.12288019061088562,
-0.028259359300136566,
-0.1445964276790619
],
"type": "VEC3"
},
{
"bufferView": 1,
"componentType": 5126,
"count": 9746,
"type": "VEC3"
},
{
"bufferView": 2,
"componentType": 5126,
"count": 9746,
"type": "VEC2"
},
{
"bufferView": 3,
"componentType": 5123,
"count": 52368,
"type": "SCALAR"
}
],
"bufferViews": [
{
"buffer": 0,
"byteLength": 116952,
"byteOffset": 0
},
{
"buffer": 0,
"byteLength": 116952,
"byteOffset": 116952
},
{
"buffer": 0,
"byteLength": 77968,
"byteOffset": 233904
},
{
"buffer": 0,
"byteLength": 104736,
"byteOffset": 311872
}
],
"samplers": [
{
"magFilter": 9729,
"minFilter": 9987
}
],
"buffers": [
{
"byteLength": 416608,
"uri": "marble_bust_01.bin"
}
]
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
samples/04_scenes/model/OrientationTest.glb (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -0,0 +1,189 @@
// =============================================
// Aster: pipeline_utils.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "pipeline_utils.h"
#include "device.h"
#include "render_resource_manager.h"
#include "helpers.h"
#include <EASTL/array.h>
Pipeline
CreatePipeline(const Device *device, vk::Format attachmentFormat, const RenderResourceManager *resourceManager)
{
// Pipeline Setup
auto vertexShaderModule = CreateShader(device, VERTEX_SHADER_FILE);
auto fragmentShaderModule = CreateShader(device, FRAGMENT_SHADER_FILE);
eastl::array<vk::PipelineShaderStageCreateInfo, 2> shaderStages = {{
{
.stage = vk::ShaderStageFlagBits::eVertex,
.module = vertexShaderModule,
.pName = "main",
},
{
.stage = vk::ShaderStageFlagBits::eFragment,
.module = fragmentShaderModule,
.pName = "main",
},
}};
eastl::vector<vk::DescriptorSetLayout> descriptorSetLayouts;
descriptorSetLayouts.push_back(resourceManager->m_SetLayout);
{
eastl::array descriptorSetLayoutBindings = {
vk::DescriptorSetLayoutBinding{
.binding = 0,
.descriptorType = vk::DescriptorType::eUniformBuffer,
.descriptorCount = 1,
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
vk::DescriptorSetLayoutBinding{
.binding = 1,
.descriptorType = vk::DescriptorType::eUniformBuffer,
.descriptorCount = 1,
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
vk::DescriptorSetLayoutBinding{
.binding = 2,
.descriptorType = vk::DescriptorType::eUniformBuffer,
.descriptorCount = 1,
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
};
vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = {
.bindingCount = Cast<u32>(descriptorSetLayoutBindings.size()),
.pBindings = descriptorSetLayoutBindings.data(),
};
vk::DescriptorSetLayout descriptorSetLayout;
AbortIfFailed(
device->m_Device.createDescriptorSetLayout(&descriptorSetLayoutCreateInfo, nullptr, &descriptorSetLayout));
descriptorSetLayouts.push_back(descriptorSetLayout);
}
vk::PushConstantRange pushConstantRange = {
.stageFlags = vk::ShaderStageFlagBits::eAll,
.offset = 0,
.size = 96,
};
vk::PipelineLayoutCreateInfo pipelineLayoutCreateInfo = {
.setLayoutCount = Cast<u32>(descriptorSetLayouts.size()),
.pSetLayouts = descriptorSetLayouts.data(),
.pushConstantRangeCount = 1,
.pPushConstantRanges = &pushConstantRange,
};
vk::PipelineLayout pipelineLayout;
AbortIfFailed(device->m_Device.createPipelineLayout(&pipelineLayoutCreateInfo, nullptr, &pipelineLayout));
device->SetName(pipelineLayout, "Box Layout");
descriptorSetLayouts[0] = nullptr; // Not owned.
vk::PipelineVertexInputStateCreateInfo vertexInputStateCreateInfo = {};
vk::PipelineInputAssemblyStateCreateInfo inputAssemblyStateCreateInfo = {
.topology = vk::PrimitiveTopology::eTriangleList,
.primitiveRestartEnable = false,
};
vk::PipelineViewportStateCreateInfo viewportStateCreateInfo = {
.viewportCount = 1,
.scissorCount = 1,
};
vk::PipelineRasterizationStateCreateInfo rasterizationStateCreateInfo = {
.depthClampEnable = false,
.rasterizerDiscardEnable = false,
.polygonMode = vk::PolygonMode::eFill,
.cullMode = vk::CullModeFlagBits::eBack,
.frontFace = vk::FrontFace::eCounterClockwise,
.depthBiasEnable = false,
.lineWidth = 1.0,
};
vk::PipelineMultisampleStateCreateInfo multisampleStateCreateInfo = {
.rasterizationSamples = vk::SampleCountFlagBits::e1,
.sampleShadingEnable = false,
};
vk::PipelineDepthStencilStateCreateInfo depthStencilStateCreateInfo = {
.depthTestEnable = true,
.depthWriteEnable = true,
.depthCompareOp = vk::CompareOp::eLess,
};
vk::PipelineColorBlendAttachmentState colorBlendAttachmentState = {
.blendEnable = false,
.srcColorBlendFactor = vk::BlendFactor::eSrcColor,
.dstColorBlendFactor = vk::BlendFactor::eOneMinusSrcColor,
.colorBlendOp = vk::BlendOp::eAdd,
.srcAlphaBlendFactor = vk::BlendFactor::eSrcAlpha,
.dstAlphaBlendFactor = vk::BlendFactor::eOneMinusSrcAlpha,
.alphaBlendOp = vk::BlendOp::eAdd,
.colorWriteMask = vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG |
vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA,
};
vk::PipelineColorBlendStateCreateInfo colorBlendStateCreateInfo = {
.logicOpEnable = false,
.attachmentCount = 1,
.pAttachments = &colorBlendAttachmentState,
};
eastl::array dynamicStates = {
vk::DynamicState::eScissor,
vk::DynamicState::eViewport,
};
vk::PipelineDynamicStateCreateInfo dynamicStateCreateInfo = {
.dynamicStateCount = Cast<u32>(dynamicStates.size()),
.pDynamicStates = dynamicStates.data(),
};
vk::PipelineRenderingCreateInfo renderingCreateInfo = {
.viewMask = 0,
.colorAttachmentCount = 1,
.pColorAttachmentFormats = &attachmentFormat,
.depthAttachmentFormat = vk::Format::eD24UnormS8Uint,
};
vk::GraphicsPipelineCreateInfo pipelineCreateInfo = {
.pNext = &renderingCreateInfo,
.stageCount = Cast<u32>(shaderStages.size()),
.pStages = shaderStages.data(),
.pVertexInputState = &vertexInputStateCreateInfo,
.pInputAssemblyState = &inputAssemblyStateCreateInfo,
.pViewportState = &viewportStateCreateInfo,
.pRasterizationState = &rasterizationStateCreateInfo,
.pMultisampleState = &multisampleStateCreateInfo,
.pDepthStencilState = &depthStencilStateCreateInfo,
.pColorBlendState = &colorBlendStateCreateInfo,
.pDynamicState = &dynamicStateCreateInfo,
.layout = pipelineLayout,
};
vk::Pipeline pipeline;
AbortIfFailed(
device->m_Device.createGraphicsPipelines(device->m_PipelineCache, 1, &pipelineCreateInfo, nullptr, &pipeline));
device->SetName(pipeline, "Box Pipeline");
device->m_Device.destroy(vertexShaderModule, nullptr);
device->m_Device.destroy(fragmentShaderModule, nullptr);
return {device, pipelineLayout, pipeline, std::move(descriptorSetLayouts)};
}
vk::ShaderModule
CreateShader(const Device *device, cstr shaderFile)
{
eastl::vector<u32> shaderCode = ReadFile(shaderFile);
const vk::ShaderModuleCreateInfo shaderModuleCreateInfo = {
.codeSize = shaderCode.size() * sizeof(u32),
.pCode = shaderCode.data(),
};
vk::ShaderModule shaderModule;
AbortIfFailedMV(device->m_Device.createShaderModule(&shaderModuleCreateInfo, nullptr, &shaderModule),
"Shader {} could not be created.", shaderFile);
return shaderModule;
}

View File

@ -0,0 +1,20 @@
// =============================================
// Aster: pipeline_utils.h
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "global.h"
#include "pipeline.h"
struct RenderResourceManager;
struct Swapchain;
struct Device;
constexpr auto VERTEX_SHADER_FILE = "shader/model.vert.glsl.spv";
constexpr auto FRAGMENT_SHADER_FILE = "shader/model.frag.glsl.spv";
vk::ShaderModule CreateShader(const Device *device, cstr shaderFile);
Pipeline
CreatePipeline(const Device *device, vk::Format attachmentFormat, const RenderResourceManager *resourceManager);

View File

@ -0,0 +1,943 @@
// =============================================
// Aster: render_resource_manager.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "render_resource_manager.h"
#include "buffer.h"
#include "device.h"
#include "helpers.h"
#include "image.h"
#include <EASTL/array.h>
void
TextureManager::Init(const u32 maxCapacity)
{
m_MaxCapacity = maxCapacity;
m_FreeHead = GpuResourceHandle::INVALID_HANDLE;
}
TextureHandle
TextureManager::Commit(Texture *texture)
{
ERROR_IF(!texture || !texture->IsValid(), "Texture must be valid for commital")
THEN_ABORT(-1);
if (m_FreeHead != GpuResourceHandle::INVALID_HANDLE)
{
const u32 index = m_FreeHead;
Texture *allocatedTexture = &m_Textures[index];
assert(!allocatedTexture->IsValid());
m_FreeHead = *Recast<u32 *>(allocatedTexture);
// Ensure it is copyable.
static_assert(std::is_trivially_copyable_v<Texture>);
*allocatedTexture = *texture;
// Take ownership of the texture.
texture->m_Flags_ &= ~Texture::OWNED_BIT;
return {index};
}
const u32 index = Cast<u32>(m_Textures.size());
if (index < m_MaxCapacity)
{
Texture *allocatedTexture = &m_Textures.push_back();
// Ensure it is copyable.
static_assert(std::is_trivially_copyable_v<Texture>);
*allocatedTexture = *texture;
texture->m_Flags_ &= ~Texture::OWNED_BIT;
return {index};
}
ERROR("Out of Buffers") THEN_ABORT(-1);
}
Texture *
TextureManager::Fetch(const TextureHandle handle)
{
assert(!handle.IsInvalid());
return &m_Textures[handle.m_Index];
}
void
TextureManager::Release(const Device *device, const TextureHandle handle)
{
assert(!handle.IsInvalid());
Texture *allocatedTexture = &m_Textures[handle.m_Index];
allocatedTexture->Destroy(device);
assert(!allocatedTexture->IsValid());
*Recast<u32 *>(allocatedTexture) = m_FreeHead;
m_FreeHead = handle.m_Index;
}
void
TextureManager::Destroy(const Device *device)
{
for (auto &texture : m_Textures)
{
texture.Destroy(device);
}
}
void
BufferManager::Init(const u32 maxCapacity)
{
m_MaxCapacity = maxCapacity;
m_FreeHead = GpuResourceHandle::INVALID_HANDLE;
}
BufferHandle
BufferManager::Commit(StorageBuffer *buffer)
{
ERROR_IF(!buffer || !buffer->IsValid() || !buffer->IsOwned(), "Buffer must be valid and owned for commital")
THEN_ABORT(-1);
if (m_FreeHead != GpuResourceHandle::INVALID_HANDLE)
{
const u32 index = m_FreeHead;
StorageBuffer *allocatedBuffer = &m_Buffers[index];
assert(!allocatedBuffer->IsValid());
m_FreeHead = *Recast<u32 *>(allocatedBuffer);
// Ensure it is copyable.
static_assert(std::is_trivially_copyable_v<StorageBuffer>);
*allocatedBuffer = *buffer;
// Take ownership of the buffer.
buffer->m_Size_ &= ~StorageBuffer::OWNED_BIT;
return {index};
}
const u32 index = Cast<u32>(m_Buffers.size());
if (index < m_MaxCapacity)
{
StorageBuffer *allocatedBuffer = &m_Buffers.push_back();
// Ensure it is copyable.
static_assert(std::is_trivially_copyable_v<StorageBuffer>);
*allocatedBuffer = *buffer;
buffer->m_Size_ &= ~StorageBuffer::OWNED_BIT;
return {index};
}
ERROR("Out of Buffers") THEN_ABORT(-1);
}
StorageBuffer *
BufferManager::Fetch(const BufferHandle handle)
{
assert(!handle.IsInvalid());
return &m_Buffers[handle.m_Index];
}
void
BufferManager::Release(const Device *device, const BufferHandle handle)
{
assert(!handle.IsInvalid());
StorageBuffer *allocatedBuffer = &m_Buffers[handle.m_Index];
allocatedBuffer->Destroy(device);
assert(!allocatedBuffer->IsValid());
*Recast<u32 *>(allocatedBuffer) = m_FreeHead;
m_FreeHead = handle.m_Index;
}
void
BufferManager::Destroy(const Device *device)
{
for (auto &buffer : m_Buffers)
{
buffer.Destroy(device);
}
}
StorageTextureHandle
StorageTextureManager::Commit(StorageTexture *texture)
{
const TextureHandle tx = TextureManager::Commit(texture);
return {tx.m_Index};
}
StorageTexture *
StorageTextureManager::Fetch(const StorageTextureHandle handle)
{
assert(!handle.IsInvalid());
return Recast<StorageTexture *>(&m_Textures[handle.m_Index]);
}
void
StorageTextureManager::Release(const Device *device, const StorageTextureHandle handle)
{
TextureManager::Release(device, {handle.m_Index});
}
usize
HashSamplerCreateInfo(const vk::SamplerCreateInfo *createInfo)
{
usize hash = HashAny(createInfo->flags);
hash = HashCombine(hash, HashAny(createInfo->magFilter));
hash = HashCombine(hash, HashAny(createInfo->minFilter));
hash = HashCombine(hash, HashAny(createInfo->mipmapMode));
hash = HashCombine(hash, HashAny(createInfo->addressModeU));
hash = HashCombine(hash, HashAny(createInfo->addressModeV));
hash = HashCombine(hash, HashAny(createInfo->addressModeW));
hash = HashCombine(hash, HashAny(Cast<usize>(createInfo->mipLodBias * 1000))); // Resolution of 10^-3
hash = HashCombine(hash, HashAny(createInfo->anisotropyEnable));
hash = HashCombine(hash,
HashAny(Cast<usize>(createInfo->maxAnisotropy * 0x10))); // 16:1 Anisotropy is enough resolution
hash = HashCombine(hash, HashAny(createInfo->compareEnable));
hash = HashCombine(hash, HashAny(createInfo->compareOp));
hash = HashCombine(hash, HashAny(Cast<usize>(createInfo->minLod * 1000))); // 0.001 resolution is enough.
hash = HashCombine(hash,
HashAny(Cast<usize>(createInfo->maxLod * 1000))); // 0.001 resolution is enough. (1 == NO Clamp)
hash = HashCombine(hash, HashAny(createInfo->borderColor));
hash = HashCombine(hash, HashAny(createInfo->unnormalizedCoordinates));
return hash;
}
void
SamplerManager::Init(usize size)
{
m_Samplers.reserve(size);
m_SamplerHashes.reserve(size);
}
SamplerHandle
SamplerManager::Create(const Device *device, const vk::SamplerCreateInfo *createInfo)
{
const usize hash = HashSamplerCreateInfo(createInfo);
for (u32 index = 0; usize samplerHash : m_SamplerHashes)
{
if (samplerHash == hash)
{
return {index};
}
++index;
}
vk::Sampler sampler;
AbortIfFailed(device->m_Device.createSampler(createInfo, nullptr, &sampler));
const u32 index = Cast<u32>(m_SamplerHashes.size());
m_SamplerHashes.push_back(hash);
m_Samplers.push_back(sampler);
return {index};
}
vk::Sampler
SamplerManager::Fetch(const SamplerHandle handle)
{
assert(!handle.IsInvalid());
return m_Samplers[handle.m_Index];
}
void
SamplerManager::Destroy(const Device *device)
{
for (const auto &sampler : m_Samplers)
{
device->m_Device.destroy(sampler, nullptr);
}
m_Samplers.clear();
m_SamplerHashes.clear();
}
void
VirtualizedBufferPool::InitStorage(const Device *device, usize bufferMaxSize)
{
auto buffer = std::make_unique<StorageBuffer>();
buffer->Init(device, bufferMaxSize, true, true, "Unified Geometry Buffer");
m_BackingBuffer = std::move(buffer);
vk::BufferDeviceAddressInfo addressInfo = {
.buffer = m_BackingBuffer->m_Buffer,
};
m_BufferPtr = device->m_Device.getBufferAddress(&addressInfo);
const VmaVirtualBlockCreateInfo virtualBlockCreateInfo = {
.size = bufferMaxSize,
};
AbortIfFailed(Cast<vk::Result>(vmaCreateVirtualBlock(&virtualBlockCreateInfo, &m_Block)));
}
void
VirtualizedBufferPool::InitIndex(const Device *device, usize bufferMaxSize)
{
auto buffer = std::make_unique<StorageIndexBuffer>();
buffer->Init(device, bufferMaxSize, true, true, "Unified Index Buffer");
m_BackingBuffer = std::move(buffer);
vk::BufferDeviceAddressInfo addressInfo = {
.buffer = m_BackingBuffer->m_Buffer,
};
m_BufferPtr = device->m_Device.getBufferAddress(&addressInfo);
const VmaVirtualBlockCreateInfo virtualBlockCreateInfo = {
.size = bufferMaxSize,
};
AbortIfFailed(Cast<vk::Result>(vmaCreateVirtualBlock(&virtualBlockCreateInfo, &m_Block)));
}
void
VirtualizedBufferPool::UpdateToGpu(const Device *device)
{
// Unrequired until adding the non-ReBAR support.
}
VirtualizedBufferHandle
VirtualizedBufferPool::Create(usize size, usize alignment)
{
const VmaVirtualAllocationCreateInfo virtualAllocationCreateInfo = {
.size = size,
.alignment = alignment,
};
VmaVirtualAllocation allocation;
usize offset;
AbortIfFailed(vmaVirtualAllocate(m_Block, &virtualAllocationCreateInfo, &allocation, &offset));
const VirtualBuffer virtualBuffer = {
.m_Allocation = allocation,
.m_Offset = offset,
.m_Size = size,
};
u32 index;
VirtualBuffer *allocVBuf;
if (m_FreeHead != GpuResourceHandle::INVALID_HANDLE)
{
index = m_FreeHead;
allocVBuf = &m_VirtualBuffers[index];
m_FreeHead = *Recast<u32 *>(allocVBuf);
}
else
{
index = Cast<u32>(m_VirtualBuffers.size());
allocVBuf = &m_VirtualBuffers.push_back();
}
// Ensure it is copyable.
static_assert(std::is_trivially_copyable_v<Texture>);
*allocVBuf = virtualBuffer;
m_Dirty = true;
return {index};
}
uptr
VirtualizedBufferPool::FetchOffset(VirtualizedBufferHandle handle)
{
assert(!handle.IsInvalid());
return m_VirtualBuffers[handle.m_Index].m_Offset;
}
void
VirtualizedBufferPool::Release(VirtualizedBufferHandle handle)
{
assert(!handle.IsInvalid());
VirtualBuffer *virtualBuffer = &m_VirtualBuffers[handle.m_Index];
vmaVirtualFree(m_Block, virtualBuffer->m_Allocation);
*Recast<u32 *>(virtualBuffer) = m_FreeHead;
m_FreeHead = handle.m_Index;
}
void
VirtualizedBufferPool::Write(VirtualizedBufferHandle handle, usize offset, usize size, const void *data)
{
if (handle.IsInvalid())
return;
assert(m_BackingBuffer->IsMapped() && "Non ReBAR not supported.");
const VirtualBuffer *virtualBuffer = &m_VirtualBuffers[handle.m_Index];
assert(offset + size <= virtualBuffer->m_Size);
u8 *target = m_BackingBuffer->m_Mapped + virtualBuffer->m_Offset + offset;
memcpy(target, data, size);
}
void
VirtualizedBufferPool::Destroy(const Device *device)
{
m_BackingBuffer->Destroy(device);
m_BackingBuffer.reset();
}
RenderResourceManager::WriteInfo::WriteInfo(vk::DescriptorBufferInfo info)
: uBufferInfo(info)
{
}
RenderResourceManager::WriteInfo::WriteInfo(vk::DescriptorImageInfo info)
: uImageInfo(info)
{
}
RenderResourceManager::WriteInfo::WriteInfo(vk::BufferView info)
: uBufferView(info)
{
}
BufferHandle
RenderResourceManager::Commit(StorageBuffer *storageBuffer)
{
const BufferHandle handle = m_BufferManager.Commit(storageBuffer);
m_WriteInfos.emplace_back(vk::DescriptorBufferInfo{
.buffer = storageBuffer->m_Buffer,
.offset = 0,
.range = storageBuffer->GetSize(),
});
m_Writes.push_back({
.dstSet = m_DescriptorSet,
.dstBinding = BUFFER_BINDING_INDEX,
.dstArrayElement = handle.m_Index,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eStorageBuffer,
.pBufferInfo = &m_WriteInfos.back().uBufferInfo,
});
m_WriteOwner.emplace_back(HandleType::eBuffer, handle.m_Index);
#if !defined(ASTER_NDEBUG)
++m_CommitedBufferCount;
#endif
return handle;
}
void
RenderResourceManager::Write(const BufferHandle handle, const usize offset, const usize size, const void *data)
{
m_BufferManager.Fetch(handle)->Write(m_Device, offset, size, data);
}
void
RenderResourceManager::EraseWrites(u32 handleIndex, HandleType handleType)
{
auto writeIter = m_Writes.begin();
auto ownerIter = m_WriteOwner.begin();
const auto ownerEnd = m_WriteOwner.end();
while (ownerIter != ownerEnd)
{
if (ownerIter->first == handleType && ownerIter->second == handleIndex)
{
*writeIter = m_Writes.back();
*ownerIter = m_WriteOwner.back();
m_Writes.pop_back();
m_WriteOwner.pop_back();
return;
}
++ownerIter;
++writeIter;
}
}
void
RenderResourceManager::Release(BufferHandle handle)
{
if (handle.IsInvalid())
return;
EraseWrites(handle.m_Index, HandleType::eBuffer);
m_BufferManager.Release(m_Device, handle);
#if !defined(ASTER_NDEBUG)
--m_CommitedBufferCount;
#endif
}
void
RenderResourceManager::Release(StorageBuffer *storageBuffer, const BufferHandle handle)
{
assert(storageBuffer);
assert(!storageBuffer->IsValid());
StorageBuffer *internal = m_BufferManager.Fetch(handle);
*storageBuffer = *internal;
internal->m_Size_ &= ~StorageBuffer::OWNED_BIT;
Release(handle);
}
void
RenderResourceManager::Release(TextureHandle handle)
{
if (handle.IsInvalid())
return;
EraseWrites(handle.m_Index, HandleType::eTexture);
m_TextureManager.Release(m_Device, handle);
#if !defined(ASTER_NDEBUG)
--m_CommitedTextureCount;
#endif
}
void
RenderResourceManager::Release(Texture *texture, TextureHandle handle)
{
assert(texture);
assert(!texture->IsValid());
Texture *internal = m_TextureManager.Fetch(handle);
*texture = *internal;
internal->m_Flags_ &= ~Texture::OWNED_BIT;
Release(handle);
}
TextureHandle
RenderResourceManager::CommitTexture(Texture *texture, const SamplerHandle sampler)
{
TextureHandle handle = m_TextureManager.Commit(texture);
const vk::Sampler samplerImpl = sampler.IsInvalid() ? m_DefaultSampler : m_SamplerManager.Fetch(sampler);
m_WriteInfos.emplace_back(vk::DescriptorImageInfo{
.sampler = samplerImpl,
.imageView = texture->m_View,
.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
});
m_Writes.push_back({
.dstSet = m_DescriptorSet,
.dstBinding = TEXTURE_BINDING_INDEX,
.dstArrayElement = handle.m_Index,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
.pImageInfo = &m_WriteInfos.back().uImageInfo,
});
m_WriteOwner.emplace_back(HandleType::eTexture, handle.m_Index);
#if !defined(ASTER_NDEBUG)
++m_CommitedTextureCount;
#endif
return {handle};
}
StorageTextureHandle
RenderResourceManager::CommitStorageTexture(StorageTexture *storageTexture, SamplerHandle sampler)
{
StorageTextureHandle handle = m_StorageTextureManager.Commit(storageTexture);
vk::Sampler samplerImpl = sampler.IsInvalid() ? m_DefaultSampler : m_SamplerManager.Fetch(sampler);
m_WriteInfos.emplace_back(vk::DescriptorImageInfo{
.sampler = samplerImpl,
.imageView = storageTexture->m_View,
.imageLayout = vk::ImageLayout::eGeneral,
});
m_Writes.push_back({
.dstSet = m_DescriptorSet,
.dstBinding = STORAGE_TEXTURE_BINDING_INDEX,
.dstArrayElement = handle.m_Index,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eStorageImage,
.pImageInfo = &m_WriteInfos.back().uImageInfo,
});
m_WriteOwner.emplace_back(HandleType::eStorageTexture, handle.m_Index);
#if !defined(ASTER_NDEBUG)
++m_CommitedStorageTextureCount;
#endif
return {handle};
}
void
RenderResourceManager::Release(StorageTextureHandle handle)
{
if (handle.IsInvalid())
return;
EraseWrites(handle.m_Index, HandleType::eTexture);
m_StorageTextureManager.Release(m_Device, handle);
#if !defined(ASTER_NDEBUG)
--m_CommitedStorageTextureCount;
#endif
}
void
RenderResourceManager::Release(StorageTexture *texture, const StorageTextureHandle handle)
{
assert(texture);
assert(!texture->IsValid());
StorageTexture *internal = m_StorageTextureManager.Fetch(handle);
*texture = *internal;
internal->m_Flags_ &= ~StorageTexture::OWNED_BIT;
Release(handle);
}
void
RenderResourceManager::Update()
{
// Descriptor Updates
if (!m_Writes.empty())
{
m_Device->m_Device.updateDescriptorSets(Cast<u32>(m_Writes.size()), m_Writes.data(), 0, nullptr);
m_Writes.clear();
m_WriteInfos.clear();
m_WriteOwner.clear();
}
// Sub-system updates
m_Geometry.UpdateToGpu(m_Device);
}
RenderResourceManager::RenderResourceManager(Device *device, u16 maxSize, bool useBufferAddress)
: m_Device(device)
, m_UseBufferAddr(useBufferAddress)
{
vk::PhysicalDeviceProperties properties;
m_Device->m_PhysicalDevice.getProperties(&properties);
u32 buffersCount = eastl::min(properties.limits.maxPerStageDescriptorStorageBuffers - 1024, Cast<u32>(maxSize));
u32 texturesCount = eastl::min(properties.limits.maxPerStageDescriptorSampledImages - 1024, Cast<u32>(maxSize));
u32 storageTexturesCount =
eastl::min(properties.limits.maxPerStageDescriptorStorageImages - 1024, Cast<u32>(maxSize));
INFO("Max Buffer Count: {}", buffersCount);
INFO("Max Texture Count: {}", texturesCount);
INFO("Max Storage Texture Count: {}", storageTexturesCount);
m_Geometry.InitStorage(device, Megabyte(128u));
m_Index.InitIndex(device, Megabyte(8u));
m_Material.InitStorage(device, Kilobyte(560u));
m_BufferManager.Init(buffersCount);
m_TextureManager.Init(texturesCount);
m_StorageTextureManager.Init(storageTexturesCount);
m_SamplerManager.Init(storageTexturesCount);
m_DefaultSamplerCreateInfo = {
.magFilter = vk::Filter::eLinear,
.minFilter = vk::Filter::eLinear,
.mipmapMode = vk::SamplerMipmapMode::eLinear,
.addressModeU = vk::SamplerAddressMode::eRepeat,
.addressModeV = vk::SamplerAddressMode::eRepeat,
.addressModeW = vk::SamplerAddressMode::eRepeat,
.mipLodBias = 0.0f,
.anisotropyEnable = true,
.maxAnisotropy = properties.limits.maxSamplerAnisotropy,
.compareEnable = false,
.minLod = 0,
.maxLod = VK_LOD_CLAMP_NONE,
.borderColor = vk::BorderColor::eFloatOpaqueBlack,
.unnormalizedCoordinates = false,
};
m_DefaultSampler = m_SamplerManager.Fetch(m_SamplerManager.Create(device, &m_DefaultSamplerCreateInfo));
eastl::array poolSizes = {
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eStorageBuffer,
.descriptorCount = buffersCount,
},
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eCombinedImageSampler,
.descriptorCount = texturesCount,
},
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eStorageImage,
.descriptorCount = storageTexturesCount,
},
};
const vk::DescriptorPoolCreateInfo poolCreateInfo = {
.flags = vk::DescriptorPoolCreateFlagBits::eUpdateAfterBind,
.maxSets = 1,
.poolSizeCount = Cast<u32>(poolSizes.size()),
.pPoolSizes = poolSizes.data(),
};
AbortIfFailed(device->m_Device.createDescriptorPool(&poolCreateInfo, nullptr, &m_DescriptorPool));
vk::DescriptorBindingFlags bindingFlags =
vk::DescriptorBindingFlagBits::ePartiallyBound | vk::DescriptorBindingFlagBits::eUpdateAfterBind;
eastl::array layoutBindingFlags = {
bindingFlags,
bindingFlags,
bindingFlags,
};
vk::DescriptorSetLayoutBindingFlagsCreateInfo bindingFlagsCreateInfo = {
.bindingCount = Cast<u32>(layoutBindingFlags.size()),
.pBindingFlags = layoutBindingFlags.data(),
};
eastl::array descriptorLayoutBindings = {
vk::DescriptorSetLayoutBinding{
.binding = BUFFER_BINDING_INDEX,
.descriptorType = vk::DescriptorType::eStorageBuffer,
.descriptorCount = Cast<u32>(buffersCount),
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
vk::DescriptorSetLayoutBinding{
.binding = TEXTURE_BINDING_INDEX,
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
.descriptorCount = Cast<u32>(texturesCount),
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
vk::DescriptorSetLayoutBinding{
.binding = STORAGE_TEXTURE_BINDING_INDEX,
.descriptorType = vk::DescriptorType::eStorageImage,
.descriptorCount = Cast<u32>(storageTexturesCount),
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
};
static_assert(layoutBindingFlags.size() == descriptorLayoutBindings.size());
const vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = {
.pNext = &bindingFlagsCreateInfo,
.flags = vk::DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool,
.bindingCount = Cast<u32>(descriptorLayoutBindings.size()),
.pBindings = descriptorLayoutBindings.data(),
};
AbortIfFailed(device->m_Device.createDescriptorSetLayout(&descriptorSetLayoutCreateInfo, nullptr, &m_SetLayout));
// One descriptor is enough. Updating it at any time is safe. (Update until submit, data held when pending)
// https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VK_EXT_descriptor_indexing.html
// https://github.com/KhronosGroup/Vulkan-Guide/blob/main/chapters/extensions/VK_EXT_descriptor_indexing.adoc
const vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo = {
.descriptorPool = m_DescriptorPool,
.descriptorSetCount = 1,
.pSetLayouts = &m_SetLayout,
};
AbortIfFailed(device->m_Device.allocateDescriptorSets(&descriptorSetAllocateInfo, &m_DescriptorSet));
m_Device->SetName(m_SetLayout, "Bindless Layout");
m_Device->SetName(m_DescriptorPool, "Bindless Pool");
m_Device->SetName(m_DescriptorSet, "Bindless Set");
// NOTE: This needs to be synced with the destructor manually.
assert(Commit(m_Geometry.m_BackingBuffer.get()).m_Index == UNIFIED_GEOMETRY_DATA_HANDLE_INDEX); // Making an assumption to avoid extra bindings.
}
RenderResourceManager::~RenderResourceManager()
{
// NOTE: Matches the constructor.
Release(BufferHandle{0});
#if !defined(ASTER_NDEBUG)
WARN_IF(m_CommitedBufferCount > 0 || m_CommitedTextureCount > 0 || m_CommitedStorageTextureCount > 0,
"Resources alive: SSBO = {}, Textures = {}, RWTexture = {}", m_CommitedBufferCount, m_CommitedTextureCount,
m_CommitedStorageTextureCount);
#endif
m_Geometry.Destroy(m_Device);
m_Index.Destroy(m_Device);
m_Material.Destroy(m_Device);
m_BufferManager.Destroy(m_Device);
m_TextureManager.Destroy(m_Device);
m_StorageTextureManager.Destroy(m_Device);
m_SamplerManager.Destroy(m_Device);
m_Device->m_Device.destroy(m_DescriptorPool, nullptr);
m_Device->m_Device.destroy(m_SetLayout, nullptr);
}
RenderResourceManager::RenderResourceManager(RenderResourceManager &&other) noexcept
: m_WriteInfos(std::move(other.m_WriteInfos))
, m_Writes(std::move(other.m_Writes))
, m_WriteOwner(std::move(other.m_WriteOwner))
, m_Geometry(std::move(other.m_Geometry))
, m_Index(std::move(other.m_Index))
, m_Material(std::move(other.m_Material))
, m_BufferManager(std::move(other.m_BufferManager))
, m_TextureManager(std::move(other.m_TextureManager))
, m_StorageTextureManager(std::move(other.m_StorageTextureManager))
, m_SamplerManager(std::move(other.m_SamplerManager))
, m_Device(Take(other.m_Device))
, m_DescriptorPool(other.m_DescriptorPool)
, m_SetLayout(other.m_SetLayout)
, m_DescriptorSet(other.m_DescriptorSet)
, m_UseBufferAddr(other.m_UseBufferAddr)
#if !defined(ASTER_NDEBUG)
, m_CommitedBufferCount(other.m_CommitedBufferCount)
, m_CommitedTextureCount(other.m_CommitedTextureCount)
, m_CommitedStorageTextureCount(other.m_CommitedStorageTextureCount)
#endif
{
assert(!other.m_Device);
}
RenderResourceManager &
RenderResourceManager::operator=(RenderResourceManager &&other) noexcept
{
if (this == &other)
return *this;
m_WriteInfos = std::move(other.m_WriteInfos);
m_Writes = std::move(other.m_Writes);
m_WriteOwner = std::move(other.m_WriteOwner);
m_Geometry = std::move(other.m_Geometry);
m_Index = std::move(other.m_Index);
m_Material = std::move(other.m_Material);
m_BufferManager = std::move(other.m_BufferManager);
m_TextureManager = std::move(other.m_TextureManager);
m_StorageTextureManager = std::move(other.m_StorageTextureManager);
m_SamplerManager = std::move(other.m_SamplerManager);
m_Device = Take(other.m_Device); // Ensure taken.
m_DescriptorPool = other.m_DescriptorPool;
m_SetLayout = other.m_SetLayout;
m_DescriptorSet = other.m_DescriptorSet;
m_UseBufferAddr = other.m_UseBufferAddr;
#if !defined(ASTER_NDEBUG)
m_CommitedBufferCount = other.m_CommitedBufferCount;
m_CommitedTextureCount = other.m_CommitedTextureCount;
m_CommitedStorageTextureCount = other.m_CommitedStorageTextureCount;
#endif
assert(!other.m_Device);
return *this;
}
SamplerHandle
RenderResourceManager::CreateSampler(const vk::SamplerCreateInfo *samplerCreateInfo)
{
return m_SamplerManager.Create(m_Device, samplerCreateInfo);
}
GeometryHandle
RenderResourceManager::CreateGeometryBuffer(usize size, usize alignment, uptr* addr)
{
GeometryHandle handle = {m_Geometry.Create(size, alignment).m_Index};
if (addr)
{
*addr = FetchAddress(handle);
}
return handle;
}
uptr
RenderResourceManager::FetchAddress(GeometryHandle handle)
{
return (m_UseBufferAddr ? m_Geometry.m_BufferPtr : 0) + m_Geometry.FetchOffset(handle);
}
void
RenderResourceManager::Write(GeometryHandle handle, usize offset, usize size, const void *data)
{
m_Geometry.Write(handle, offset, size, data);
}
void
RenderResourceManager::Release(GeometryHandle handle)
{
if (handle.IsInvalid())
return;
m_Geometry.Release(handle);
}
MaterialHandle
RenderResourceManager::CreateMaterialBuffer(usize size, usize alignment, uptr* addr)
{
MaterialHandle handle = {m_Material.Create(size, alignment).m_Index};
if (addr)
{
*addr = FetchAddress(handle);
}
return handle;
}
usize
RenderResourceManager::FetchAddress(MaterialHandle handle)
{
return (m_UseBufferAddr ? m_Material.m_BufferPtr : 0) + m_Material.FetchOffset(handle);
}
void
RenderResourceManager::Write(MaterialHandle handle, usize offset, usize size, const void *data)
{
m_Material.Write(handle, offset, size, data);
}
void
RenderResourceManager::Release(MaterialHandle handle)
{
if (handle.IsInvalid())
return;
m_Material.Release(handle);
}
IndexHandle
RenderResourceManager::CreateIndexBuffer(usize size, usize alignment, u32 *firstIndex)
{
IndexHandle handle = {m_Index.Create(size, alignment).m_Index};
if (firstIndex)
{
*firstIndex = FetchIndex(handle);
}
return handle;
}
u32
RenderResourceManager::FetchIndex(IndexHandle handle)
{
return Cast<u32>(m_Index.FetchOffset(handle) / sizeof(u32));
}
void
RenderResourceManager::Write(IndexHandle handle, usize offset, usize size, const void *data)
{
m_Index.Write(handle, offset, size, data);
}
void
RenderResourceManager::Release(IndexHandle handle)
{
if (handle.IsInvalid())
return;
m_Index.Release(handle);
}
vk::Buffer
RenderResourceManager::GetIndexBuffer() const
{
return m_Index.m_BackingBuffer->m_Buffer;
}

View File

@ -0,0 +1,250 @@
// =============================================
// Aster: render_resource_manager.h
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "global.h"
#include <EASTL/deque.h>
#include <EASTL/vector_map.h>
struct Device;
struct Texture;
struct StorageTexture;
struct StorageBuffer;
struct Buffer;
struct GpuResourceHandle
{
constexpr static u32 INVALID_HANDLE = MaxValue<u32>;
u32 m_Index = INVALID_HANDLE; // Default = invalid
[[nodiscard]] bool
IsInvalid() const
{
return m_Index == INVALID_HANDLE;
}
};
struct BufferHandle : GpuResourceHandle
{
};
struct TextureHandle : GpuResourceHandle
{
};
struct StorageTextureHandle : GpuResourceHandle
{
};
struct SamplerHandle : GpuResourceHandle
{
};
struct VirtualizedBufferHandle : GpuResourceHandle
{
};
struct GeometryHandle : VirtualizedBufferHandle
{
};
struct IndexHandle : VirtualizedBufferHandle
{
};
struct MaterialHandle : VirtualizedBufferHandle
{
};
struct TextureManager
{
eastl::vector<Texture> m_Textures;
u32 m_MaxCapacity;
u32 m_FreeHead;
void Init(u32 maxCapacity);
TextureHandle Commit(Texture *texture);
Texture *Fetch(TextureHandle handle);
void Release(const Device *device, TextureHandle handle);
void Destroy(const Device *device);
};
struct BufferManager
{
eastl::vector<StorageBuffer> m_Buffers;
u32 m_MaxCapacity;
u32 m_FreeHead;
void Init(u32 maxCapacity);
BufferHandle Commit(StorageBuffer *buffer);
StorageBuffer *Fetch(BufferHandle handle);
void Release(const Device *device, BufferHandle handle);
void Destroy(const Device *device);
};
struct StorageTextureManager : TextureManager
{
StorageTextureHandle Commit(StorageTexture *texture);
StorageTexture *Fetch(StorageTextureHandle handle);
void Release(const Device *device, StorageTextureHandle handle);
};
struct SamplerManager
{
// There can only be so many samplers.
eastl::vector<vk::Sampler> m_Samplers;
eastl::vector<usize> m_SamplerHashes;
void Init(usize size);
SamplerHandle Create(const Device *device, const vk::SamplerCreateInfo *createInfo);
vk::Sampler Fetch(SamplerHandle handle);
void Destroy(const Device *device);
};
struct VirtualizedBufferPool
{
// TODO: Use buffer device address
std::unique_ptr<StorageBuffer> m_BackingBuffer;
uptr m_BufferPtr;
VmaVirtualBlock m_Block;
struct VirtualBuffer
{
VmaVirtualAllocation m_Allocation;
usize m_Offset;
usize m_Size;
};
eastl::vector<VirtualBuffer> m_VirtualBuffers;
u32 m_FreeHead = GpuResourceHandle::INVALID_HANDLE;
bool m_Dirty = false;
void InitStorage(const Device *device, usize bufferMaxSize);
void InitIndex(const Device *device, usize bufferMaxSize);
// Sync the offset buffer if required.
// FUTURE(Bob): Handle the writes for non-ReBAR system.
void UpdateToGpu(const Device *device);
VirtualizedBufferHandle Create(usize size, usize alignment);
usize FetchOffset(VirtualizedBufferHandle handle);
void Release(VirtualizedBufferHandle handle);
void Write(VirtualizedBufferHandle handle, usize offset, usize size, const void *data);
void Destroy(const Device *device);
};
struct RenderResourceManager
{
private:
union WriteInfo {
vk::DescriptorBufferInfo uBufferInfo;
vk::DescriptorImageInfo uImageInfo;
vk::BufferView uBufferView;
WriteInfo()
{
}
explicit WriteInfo(vk::DescriptorBufferInfo info);
explicit WriteInfo(vk::DescriptorImageInfo info);
explicit WriteInfo(vk::BufferView info);
};
enum class HandleType
{
eBuffer,
eTexture,
eStorageTexture,
};
using WriteOwner = eastl::pair<HandleType, u32>;
eastl::deque<WriteInfo> m_WriteInfos;
eastl::vector<vk::WriteDescriptorSet> m_Writes;
eastl::vector<WriteOwner> m_WriteOwner;
vk::Sampler m_DefaultSampler;
VirtualizedBufferPool m_Geometry;
VirtualizedBufferPool m_Index;
VirtualizedBufferPool m_Material;
BufferManager m_BufferManager;
TextureManager m_TextureManager;
StorageTextureManager m_StorageTextureManager;
SamplerManager m_SamplerManager;
void EraseWrites(u32 handleIndex, HandleType handleType);
public:
Device *m_Device;
constexpr static u32 BUFFER_BINDING_INDEX = 0;
constexpr static u32 TEXTURE_BINDING_INDEX = 1;
constexpr static u32 STORAGE_TEXTURE_BINDING_INDEX = 2;
constexpr static u32 UNIFIED_GEOMETRY_DATA_HANDLE_INDEX = 0;
constexpr static u32 UNIFIED_GEOMETRY_OFFSET_HANDLE_INDEX = 1;
constexpr static u32 MATERIAL_HANDLE_INDEX = 2;
vk::SamplerCreateInfo m_DefaultSamplerCreateInfo;
vk::DescriptorPool m_DescriptorPool;
vk::DescriptorSetLayout m_SetLayout;
vk::DescriptorSet m_DescriptorSet;
bool m_UseBufferAddr;
BufferHandle Commit(StorageBuffer *storageBuffer); // Commit to GPU and take Ownership
void Write(BufferHandle handle, usize offset, usize size, const void *data); // Write to buffer
void Release(BufferHandle handle); // Release and Destroy
void Release(StorageBuffer *storageBuffer, BufferHandle handle); // Release and Return
TextureHandle CommitTexture(Texture *texture, SamplerHandle sampler = {}); // Commit to GPU and take Ownership
void Release(TextureHandle handle); // Release and Destroy
void Release(Texture *texture, TextureHandle handle); // Release and Return
StorageTextureHandle CommitStorageTexture(StorageTexture *storageTexture,
SamplerHandle sampler = {}); // Commit to GPU and take Ownership
void Release(StorageTextureHandle handle); // Release and Destroy
void Release(StorageTexture *texture, StorageTextureHandle handle); // Release and Return
SamplerHandle CreateSampler(const vk::SamplerCreateInfo *samplerCreateInfo);
GeometryHandle CreateGeometryBuffer(usize size, usize alignment, uptr *addr = nullptr);
uptr FetchAddress(GeometryHandle handle);
void Write(GeometryHandle handle, usize offset, usize size, const void *data);
void Release(GeometryHandle handle);
MaterialHandle CreateMaterialBuffer(usize size, usize alignment, uptr *addr = nullptr);
uptr FetchAddress(MaterialHandle handle);
void Write(MaterialHandle handle, usize offset, usize size, const void *data);
void Release(MaterialHandle handle);
IndexHandle CreateIndexBuffer(usize size, usize alignment, u32 *firstIndex = nullptr);
u32 FetchIndex(IndexHandle handle);
void Write(IndexHandle handle, usize offset, usize size, const void *data);
void Release(IndexHandle handle);
vk::Buffer GetIndexBuffer() const;
void Update(); // Update all the descriptors required.
// Ctor/Dtor
RenderResourceManager(Device *device, u16 maxSize, bool useBufferAddress = true);
~RenderResourceManager();
RenderResourceManager(RenderResourceManager &&other) noexcept;
RenderResourceManager &operator=(RenderResourceManager &&other) noexcept;
#if !defined(ASTER_NDEBUG)
usize m_CommitedBufferCount = 0;
usize m_CommitedTextureCount = 0;
usize m_CommitedStorageTextureCount = 0;
#endif
DISALLOW_COPY_AND_ASSIGN(RenderResourceManager);
};

View File

@ -0,0 +1,18 @@
#version 450
#pragma shader_stage(fragment)
#extension GL_EXT_shader_explicit_arithmetic_types_int64 : enable
#extension GL_EXT_buffer_reference : require
layout (location = 2) in vec4 inColor;
layout (location = 0) out vec4 outColor;
layout(push_constant) uniform Constants {
mat4 globalTransform;
uint64_t vertexPos;
uint64_t vertexDat;
uint64_t materialIdx;
} pcb;
void main() {
outColor = vec4(inColor.rgb, 1.0f);
}

View File

@ -0,0 +1,56 @@
#version 450
#pragma shader_stage(vertex)
#extension GL_EXT_shader_explicit_arithmetic_types_int64 : enable
#extension GL_EXT_buffer_reference : require
layout(location=0) out vec4 outWorldNormal;
layout(location=1) out vec4 outWorldPosition;
layout(location=2) out vec4 outColor;
layout(location=3) out vec2 outUV0;
struct VertexData {
vec4 Normal;
vec2 TexCoord0;
vec2 TexCoord1;
vec4 Color;
};
layout(std430, buffer_reference, buffer_reference_align=16) readonly buffer VPositionRef {
vec4 Positions[];
};
layout(std430, buffer_reference, buffer_reference_align=16) readonly buffer VDataRef {
VertexData Data[];
};
layout(set=1, binding=0) uniform Camera {
mat4 View; // 64
mat4 Projection; // 128
mat4 InvView; // 192
mat4 InvProjection; // 256
vec4 Position; // 272
} camera;
layout(push_constant) uniform Constants {
mat4 globalTransform;
VPositionRef vertexPos;
VDataRef vertexDat;
uint64_t materialIdx;
} pcb;
void main() {
vec3 colors[] = {
vec3( 1.0f, 0.0f, 0.0f ),
vec3( 0.0f, 1.0f, 0.0f ),
vec3( 0.0f, 0.0f, 1.0f ),
};
gl_Position = camera.Projection * camera.View * pcb.globalTransform * vec4(pcb.vertexPos.Positions[gl_VertexIndex].xyz, 1.0f);
outColor = vec4(pcb.vertexDat.Data[gl_VertexIndex].Color.rgb, 1.0f); //vec3(colors[gl_VertexIndex % 3]);
// TODO
// layout(location=0) out vec4 outWorldNormal;
// layout(location=1) out vec4 outWorldPosition;
// layout(location=2) out vec4 outColor;
// layout(location=3) out vec2 outUV0;
}

View File

@ -0,0 +1,62 @@
struct VS_Input
{
uint VertexIndex : SV_VertexID;
};
struct VS_Out {
float4 WorldNormal : NORMAL;
float4 WorldPosition : POSITION;
float4 Color : COLOR0;
float2 TexCoord0 : TEXCOORD0;
};
struct CameraData
{
float4x4 View; // 64
float4x4 Projection; // 128
float4x4 InvView; // 192
float4x4 InvProjection; // 256
float4 Position; // 272
};
[[vk::binding(0, 1)]] ConstantBuffer<CameraData> Camera;
struct VertexData {
float4 Normal;
float2 TexCoord0;
float2 TexCoord1;
float4 Color;
};
[[vk::binding(0, 0)]] ByteAddressBuffer GeometryBuffer[];
layout(set=1, binding=0) uniform Camera {
float4x4 View; // 64
float4x4 Projection; // 128
float4x4 InvView; // 192
float4x4 InvProjection; // 256
float4 Position; // 272
} Camera;
layout(push_constant) uniform Constants {
mat4 globalTransform;
uint64 vertexPos;
VDataRef vertexDat;
uint64_t materialIdx;
} pcb;
void main() {
VS_Output Output;
float4 GlobalPosition = mul(globalTransform, GeometryBuffer[0].Load<float4>());
float4 ClipSpace = mul(Camera.View, GlobalPosition);
Output.VertexPosition = mul(Camera.Projection, ClipSpace);
Output.WorldPosition = GlobalPosition;
Output.UV0 = GetUV(StageInput.VertexIndex);
Output.VertexColor = GetColor(StageInput.VertexIndex);
Output.WorldNormal = mul(GetNormalTransform(PushConstant.NodeIdx), GetNormal(StageInput.VertexIndex));
return Output;
}

View File

@ -6,3 +6,4 @@ add_subdirectory("00_util")
add_subdirectory("01_triangle") add_subdirectory("01_triangle")
add_subdirectory("02_box") add_subdirectory("02_box")
add_subdirectory("03_model_render") add_subdirectory("03_model_render")
add_subdirectory("04_scenes")

View File

@ -7,13 +7,14 @@
{ {
"name": "imgui", "name": "imgui",
"features": [ "features": [
"docking-experimental",
"glfw-binding", "glfw-binding",
"vulkan-binding", "vulkan-binding"
"docking-experimental"
] ]
}, },
"scottt-debugbreak", "scottt-debugbreak",
"tinygltf", "tinygltf",
"vulkan-memory-allocator" "vulkan-memory-allocator",
"entt"
] ]
} }