project-aster/samples/03_model_render/asset_loader.cpp

1078 lines
38 KiB
C++

// =============================================
// Aster: asset_loader.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#define TINYGLTF_NOEXCEPTION
#define JSON_NOEXCEPTION
#define TINYGLTF_IMPLEMENTATION
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "asset_loader.h"
#include "EASTL/fixed_vector.h"
#include "buffer.h"
#include "device.h"
#include "gpu_resource_manager.h"
#include "helpers.h"
#include "image.h"
#include <EASTL/hash_map.h>
#include <glm/gtc/type_ptr.hpp>
#include <tiny_gltf.h>
#if defined(LoadImage)
#undef LoadImage
#endif
constexpr vk::CommandBufferBeginInfo OneTimeCmdBeginInfo = {.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit};
vec4
VectorToVec4(const std::vector<double> &vec)
{
if (vec.empty())
{
return vec4{0.0f};
}
assert(vec.size() == 4);
return {vec[0], vec[1], vec[2], vec[3]};
}
vec3
VectorToVec3(const std::vector<double> &vec)
{
if (vec.empty())
{
return vec3{0.0f};
}
assert(vec.size() == 3);
return {vec[0], vec[1], vec[2]};
}
void
AssetLoader::LoadHdrImage(Texture *texture, cstr path, cstr name) const
{
const Device *pDevice = m_ResourceManager->m_Device;
ERROR_IF(texture->IsValid(), "Expected invalid image.") THEN_ABORT(-1);
i32 x, y, nChannels;
const f32 *data = stbi_loadf(path, &x, &y, &nChannels, 4);
assert(nChannels == 3);
ERROR_IF(!data, "Could not load {}", path) THEN_ABORT(-1);
u32 width = Cast<u32>(x);
u32 height = Cast<u32>(y);
StagingBuffer stagingBuffer;
texture->Init(m_ResourceManager->m_Device, {width, height}, vk::Format::eR32G32B32A32Sfloat, false, path);
assert(texture->IsValid());
stagingBuffer.Init(m_ResourceManager->m_Device, (sizeof *data) * x * y * 4, "HDR Staging Buffer");
stagingBuffer.Write(m_ResourceManager->m_Device, 0, stagingBuffer.GetSize(), data);
stbi_image_free(&data);
#pragma region Setup Copy/Sync primitives
vk::BufferImageCopy2 copyRegion = {
.bufferOffset = 0,
.bufferRowLength = width,
.bufferImageHeight = height,
.imageSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {0, 0, 0},
.imageExtent = texture->m_Extent,
};
vk::CopyBufferToImageInfo2 stagingInfo = {
.srcBuffer = stagingBuffer.m_Buffer,
.dstImage = texture->m_Image,
.dstImageLayout = vk::ImageLayout::eTransferDstOptimal,
.regionCount = 1,
.pRegions = &copyRegion,
};
vk::ImageMemoryBarrier2 readyToStageBarrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eAllCommands,
.srcAccessMask = vk::AccessFlagBits2::eNone,
.dstStageMask = vk::PipelineStageFlagBits2::eAllTransfer,
.dstAccessMask = vk::AccessFlagBits2::eTransferWrite,
.oldLayout = vk::ImageLayout::eUndefined,
.newLayout = vk::ImageLayout::eTransferDstOptimal,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = texture->m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
vk::DependencyInfo readyToStageDependency = {
.memoryBarrierCount = 0,
.bufferMemoryBarrierCount = 0,
.imageMemoryBarrierCount = 1,
.pImageMemoryBarriers = &readyToStageBarrier,
};
vk::ImageMemoryBarrier2 postStagingBarrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eAllTransfer,
.srcAccessMask = vk::AccessFlagBits2::eTransferWrite,
.dstStageMask = vk::PipelineStageFlagBits2::eFragmentShader | vk::PipelineStageFlagBits2::eComputeShader,
.dstAccessMask = vk::AccessFlagBits2::eShaderRead,
.oldLayout = vk::ImageLayout::eTransferDstOptimal,
.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
.srcQueueFamilyIndex = m_TransferQueueIndex,
.dstQueueFamilyIndex = m_GraphicsQueueIndex,
.image = texture->m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
vk::DependencyInfo postStagingDependency = {
.memoryBarrierCount = 0,
.bufferMemoryBarrierCount = 0,
.imageMemoryBarrierCount = 1,
.pImageMemoryBarriers = &postStagingBarrier,
};
#pragma endregion
AbortIfFailed(m_CommandBuffer.begin(&OneTimeCmdBeginInfo));
#if !defined(ASTER_NDEBUG)
StackString<128> loadActionName = "Load: ";
loadActionName += name ? name : path;
vk::DebugUtilsLabelEXT debugLabel = {
.pLabelName = loadActionName.c_str(),
.color = std::array{1.0f, 1.0f, 1.0f, 1.0f},
};
m_CommandBuffer.beginDebugUtilsLabelEXT(&debugLabel);
#endif
m_CommandBuffer.pipelineBarrier2(&readyToStageDependency);
m_CommandBuffer.copyBufferToImage2(&stagingInfo);
m_CommandBuffer.pipelineBarrier2(&postStagingDependency);
#if !defined(ASTER_NDEBUG)
m_CommandBuffer.endDebugUtilsLabelEXT();
#endif
AbortIfFailed(m_CommandBuffer.end());
vk::SubmitInfo submitInfo = {
.waitSemaphoreCount = 0,
.pWaitDstStageMask = nullptr,
.commandBufferCount = 1,
.pCommandBuffers = &m_CommandBuffer,
};
vk::Fence fence;
vk::FenceCreateInfo fenceCreateInfo = {};
AbortIfFailed(pDevice->m_Device.createFence(&fenceCreateInfo, nullptr, &fence));
AbortIfFailed(m_TransferQueue.submit(1, &submitInfo, fence));
AbortIfFailed(pDevice->m_Device.waitForFences(1, &fence, true, MaxValue<u32>));
pDevice->m_Device.destroy(fence, nullptr);
AbortIfFailed(pDevice->m_Device.resetCommandPool(m_CommandPool, {}));
stagingBuffer.Destroy(pDevice);
}
void
GenerateMipMaps(vk::CommandBuffer commandBuffer, Texture *texture, vk::ImageLayout initialLayout,
vk::ImageLayout finalLayout, vk::PipelineStageFlags2 prevStage, vk::PipelineStageFlags2 finalStage)
{
#if !defined(ASTER_NDEBUG)
vk::DebugUtilsLabelEXT label = {
.pLabelName = "Generate Mipmap",
.color = std::array{0.9f, 0.9f, 0.9f, 1.0f},
};
commandBuffer.beginDebugUtilsLabelEXT(&label);
#endif
vk::ImageMemoryBarrier2 imageStartBarrier = {
.srcStageMask = prevStage,
.srcAccessMask = vk::AccessFlagBits2::eNone,
.dstStageMask = vk::PipelineStageFlagBits2::eTransfer,
.dstAccessMask = vk::AccessFlagBits2::eTransferRead,
.oldLayout = initialLayout,
.newLayout = vk::ImageLayout::eTransferSrcOptimal,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = texture->m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = texture->m_LayerCount,
},
};
vk::ImageMemoryBarrier2 mipsStartBarrier = imageStartBarrier;
mipsStartBarrier.dstAccessMask = vk::AccessFlagBits2::eTransferWrite;
mipsStartBarrier.oldLayout = vk::ImageLayout::eUndefined;
mipsStartBarrier.newLayout = vk::ImageLayout::eTransferDstOptimal;
mipsStartBarrier.subresourceRange = {
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 1,
.levelCount = texture->GetMipLevels() - 1,
.baseArrayLayer = 0,
.layerCount = texture->m_LayerCount,
};
eastl::fixed_vector<vk::ImageMemoryBarrier2, 2> startBarriers = {
mipsStartBarrier,
};
if (initialLayout != imageStartBarrier.newLayout)
{
startBarriers.push_back(imageStartBarrier);
}
vk::DependencyInfo imageStartDependency = {
.imageMemoryBarrierCount = Cast<u32>(startBarriers.size()),
.pImageMemoryBarriers = startBarriers.data(),
};
vk::ImageMemoryBarrier2 nextMipBarrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eTransfer,
.srcAccessMask = vk::AccessFlagBits2::eTransferWrite,
.dstStageMask = vk::PipelineStageFlagBits2::eTransfer,
.dstAccessMask = vk::AccessFlagBits2::eTransferRead,
.oldLayout = vk::ImageLayout::eTransferDstOptimal,
.newLayout = vk::ImageLayout::eTransferSrcOptimal,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = texture->m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = texture->m_LayerCount,
},
};
vk::DependencyInfo interMipDependency = {
.imageMemoryBarrierCount = 1,
.pImageMemoryBarriers = &nextMipBarrier,
};
vk::ImageMemoryBarrier2 imageReadyBarrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eTransfer,
.srcAccessMask = vk::AccessFlagBits2::eTransferWrite,
.dstStageMask = finalStage,
.dstAccessMask = vk::AccessFlagBits2::eShaderRead,
.oldLayout = vk::ImageLayout::eTransferSrcOptimal,
.newLayout = finalLayout,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = texture->m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = texture->GetMipLevels(),
.baseArrayLayer = 0,
.layerCount = texture->m_LayerCount,
},
};
vk::DependencyInfo imageReadyDependency = {
.imageMemoryBarrierCount = 1,
.pImageMemoryBarriers = &imageReadyBarrier,
};
vk::ImageBlit2 blitRegion = {
.srcSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseArrayLayer = 0,
.layerCount = texture->m_LayerCount,
},
.dstSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseArrayLayer = 0,
.layerCount = texture->m_LayerCount,
},
};
vk::BlitImageInfo2 mipBlitInfo = {
.srcImage = texture->m_Image,
.srcImageLayout = vk::ImageLayout::eTransferSrcOptimal,
.dstImage = texture->m_Image,
.dstImageLayout = vk::ImageLayout::eTransferDstOptimal,
.regionCount = 1,
.pRegions = &blitRegion,
.filter = vk::Filter::eLinear,
};
auto calcNextMip = [](i32 prev) { return eastl::max(prev / 2, 1); };
// Mip Mapping
commandBuffer.pipelineBarrier2(&imageStartDependency);
i32 prevMipWidth = Cast<i32>(texture->m_Extent.width);
i32 prevMipHeight = Cast<i32>(texture->m_Extent.height);
u32 maxPrevMip = texture->GetMipLevels() - 1;
for (u32 prevMipLevel = 0; prevMipLevel < maxPrevMip; ++prevMipLevel)
{
i32 currentMipWidth = calcNextMip(prevMipWidth);
i32 currentMipHeight = calcNextMip(prevMipHeight);
u32 currentMipLevel = prevMipLevel + 1;
blitRegion.srcSubresource.mipLevel = prevMipLevel;
blitRegion.srcOffsets = std::array{
vk::Offset3D{0, 0, 0},
vk::Offset3D{prevMipWidth, prevMipHeight, 1},
};
blitRegion.dstSubresource.mipLevel = currentMipLevel;
blitRegion.dstOffsets = std::array{
vk::Offset3D{0, 0, 0},
vk::Offset3D{currentMipWidth, currentMipHeight, 1},
};
nextMipBarrier.subresourceRange.baseMipLevel = currentMipLevel;
commandBuffer.blitImage2(&mipBlitInfo);
commandBuffer.pipelineBarrier2(&interMipDependency);
prevMipHeight = currentMipHeight;
prevMipWidth = currentMipWidth;
}
commandBuffer.pipelineBarrier2(&imageReadyDependency);
#if !defined(ASTER_NDEBUG)
commandBuffer.endDebugUtilsLabelEXT();
#endif
}
TextureHandle
AssetLoader::LoadImageToGpu(StagingBuffer *stagingBuffer, tinygltf::Image *image, bool isSrgb) const
{
assert(image->component == 4);
assert(image->height > 0 && image->width > 0);
u32 height = Cast<u32>(image->height);
u32 width = Cast<u32>(image->width);
vk::Format imageFormat = isSrgb ? vk::Format::eR8G8B8A8Srgb : vk::Format::eR8G8B8A8Unorm;
Texture texture;
usize byteSize = image->image.size();
texture.Init(m_ResourceManager->m_Device, {.width = width, .height = height}, imageFormat, true,
image->name.data());
stagingBuffer->Init(m_ResourceManager->m_Device, byteSize);
stagingBuffer->Write(m_ResourceManager->m_Device, 0, byteSize, image->image.data());
#if !defined(ASTER_NDEBUG)
StackString<128> loadActionName = "Load: ";
loadActionName += image->name.empty() ? "<texture>" : image->name.c_str();
vk::DebugUtilsLabelEXT debugLabel = {
.pLabelName = loadActionName.c_str(),
.color = std::array{1.0f, 1.0f, 1.0f, 1.0f},
};
m_CommandBuffer.beginDebugUtilsLabelEXT(&debugLabel);
#endif
#pragma region Barriers and Blits
vk::ImageMemoryBarrier2 imageStartBarrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eTopOfPipe,
.srcAccessMask = vk::AccessFlagBits2::eNone,
.dstStageMask = vk::PipelineStageFlagBits2::eTransfer,
.dstAccessMask = vk::AccessFlagBits2::eTransferWrite,
.oldLayout = vk::ImageLayout::eUndefined,
.newLayout = vk::ImageLayout::eTransferDstOptimal,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = texture.m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
vk::DependencyInfo imageStartDependency = {
.memoryBarrierCount = 0,
.bufferMemoryBarrierCount = 0,
.imageMemoryBarrierCount = 1,
.pImageMemoryBarriers = &imageStartBarrier,
};
vk::ImageMemoryBarrier2 postStagingBarrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eAllTransfer,
.dstStageMask = vk::PipelineStageFlagBits2::eAllTransfer,
.oldLayout = vk::ImageLayout::eTransferDstOptimal,
.newLayout = vk::ImageLayout::eTransferSrcOptimal,
.srcQueueFamilyIndex = m_TransferQueueIndex,
.dstQueueFamilyIndex = m_GraphicsQueueIndex,
.image = texture.m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
;
vk::DependencyInfo postStagingDependency = {
.imageMemoryBarrierCount = 1,
.pImageMemoryBarriers = &postStagingBarrier,
};
vk::BufferImageCopy2 imageCopy = {
.bufferOffset = 0,
.bufferRowLength = Cast<u32>(image->width),
.bufferImageHeight = Cast<u32>(image->height),
.imageSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {},
.imageExtent = texture.m_Extent,
};
vk::CopyBufferToImageInfo2 stagingCopyInfo = {
.srcBuffer = stagingBuffer->m_Buffer,
.dstImage = texture.m_Image,
.dstImageLayout = vk::ImageLayout::eTransferDstOptimal,
.regionCount = 1,
.pRegions = &imageCopy,
};
#pragma endregion
m_CommandBuffer.pipelineBarrier2(&imageStartDependency);
m_CommandBuffer.copyBufferToImage2(&stagingCopyInfo);
m_CommandBuffer.pipelineBarrier2(&postStagingDependency);
GenerateMipMaps(m_CommandBuffer, &texture, vk::ImageLayout::eTransferSrcOptimal,
vk::ImageLayout::eShaderReadOnlyOptimal);
#if !defined(ASTER_NDEBUG)
m_CommandBuffer.endDebugUtilsLabelEXT();
#endif
return m_ResourceManager->CommitTexture(&texture);
}
Model
AssetLoader::LoadModelToGpu(cstr path, cstr name)
{
namespace fs = std::filesystem;
tinygltf::Model model;
tinygltf::TinyGLTF loader;
const Device *pDevice = m_ResourceManager->m_Device;
const auto fsPath = fs::absolute(path);
const auto ext = fsPath.extension();
if (ext == GLTF_ASCII_FILE_EXTENSION)
{
std::string err;
std::string warn;
if (loader.LoadASCIIFromFile(&model, &err, &warn, fsPath.generic_string()))
{
ERROR_IF(!err.empty(), "{}", err)
ELSE_IF_WARN(!warn.empty(), "{}", warn);
}
}
if (ext == GLTF_BINARY_FILE_EXTENSION)
{
std::string err;
std::string warn;
if (loader.LoadBinaryFromFile(&model, &err, &warn, fsPath.generic_string()))
{
ERROR_IF(!err.empty(), "{}", err)
ELSE_IF_WARN(!warn.empty(), "{}", warn);
}
}
AbortIfFailed(m_CommandBuffer.begin(&OneTimeCmdBeginInfo));
#if !defined(ASTER_NDEBUG)
StackString<128> loadActionName = "Load: ";
loadActionName += name ? name : path;
vk::DebugUtilsLabelEXT debugLabel = {
.pLabelName = loadActionName.c_str(),
.color = std::array{1.0f, 1.0f, 1.0f, 1.0f},
};
m_CommandBuffer.beginDebugUtilsLabelEXT(&debugLabel);
#endif
eastl::vector<StagingBuffer> stagingBuffers;
eastl::hash_map<i32, TextureHandle> textureHandleMap;
eastl::vector<Material> materials;
StorageBuffer materialsBuffer;
BufferHandle materialsHandle;
if (!model.materials.empty())
{
auto getTextureHandle = [this, &textureHandleMap, &stagingBuffers, &model](i32 index,
bool isSrgb) -> TextureHandle {
if (index < 0)
{
return {};
}
const auto iter = textureHandleMap.find(index);
if (iter != textureHandleMap.end())
{
return iter->second;
}
auto *image = &model.images[index];
TextureHandle handle = LoadImageToGpu(&stagingBuffers.push_back(), image, isSrgb);
textureHandleMap.emplace(index, handle);
return handle;
};
materials.reserve(model.materials.size());
for (auto &material : model.materials)
{
materials.push_back({
.m_AlbedoFactor = VectorToVec4(material.pbrMetallicRoughness.baseColorFactor),
.m_EmissionFactor = VectorToVec3(material.emissiveFactor),
.m_MetalFactor = Cast<f32>(material.pbrMetallicRoughness.metallicFactor),
.m_RoughFactor = Cast<f32>(material.pbrMetallicRoughness.roughnessFactor),
.m_AlbedoTex = getTextureHandle(material.pbrMetallicRoughness.baseColorTexture.index, true),
.m_NormalTex = getTextureHandle(material.normalTexture.index, false),
.m_MetalRoughTex =
getTextureHandle(material.pbrMetallicRoughness.metallicRoughnessTexture.index, false),
.m_OcclusionTex = getTextureHandle(material.occlusionTexture.index, false),
.m_EmissionTex = getTextureHandle(material.emissiveTexture.index, true),
});
}
usize materialsByteSize = materials.size() * sizeof materials[0];
materialsBuffer.Init(pDevice, materialsByteSize, false, name);
materialsHandle = m_ResourceManager->Commit(&materialsBuffer);
StagingBuffer &materialStaging = stagingBuffers.push_back();
materialStaging.Init(pDevice, materialsByteSize);
materialStaging.Write(pDevice, 0, materialsByteSize, materials.data());
vk::BufferCopy bufferCopy = {.srcOffset = 0, .dstOffset = 0, .size = materialsByteSize};
m_CommandBuffer.copyBuffer(materialStaging.m_Buffer, materialsBuffer.m_Buffer, 1, &bufferCopy);
}
// TODO: Mesh reordering based on nodes AND OR meshoptimizer
// TODO: Support scenes
eastl::vector<vec4> vertexPositions;
eastl::vector<VertexData> vertexData;
eastl::vector<u32> indices;
eastl::vector<MeshPrimitive> meshPrimitives;
meshPrimitives.reserve(model.meshes.size());
// Offset, Count
eastl::vector<eastl::pair<usize, usize>> meshPrimRanges;
meshPrimRanges.reserve(model.meshes.size());
u32 vertexOffset = 0;
u32 indexOffset = 0;
for (auto &mesh : model.meshes)
{
meshPrimRanges.emplace_back(meshPrimitives.size(), mesh.primitives.size());
for (auto &prim : mesh.primitives)
{
u32 vertexCount = 0;
u32 indexCount = 0;
#pragma region Position
assert(prim.attributes.contains(APosition));
assert(prim.mode == TINYGLTF_MODE_TRIANGLES);
{
tinygltf::Accessor *posAccessor = &model.accessors[prim.attributes[APosition]];
assert(posAccessor->count <= MaxValue<u32>);
tinygltf::BufferView *posBufferView = &model.bufferViews[posAccessor->bufferView];
tinygltf::Buffer *posBuffer = &model.buffers[posBufferView->buffer];
usize byteOffset = (posAccessor->byteOffset + posBufferView->byteOffset);
vertexCount = Cast<u32>(posAccessor->count);
vertexPositions.reserve(vertexOffset + vertexCount);
if (posAccessor->type == TINYGLTF_TYPE_VEC4)
{
vec4 *data = Recast<vec4 *>(posBuffer->data.data() + byteOffset);
vertexPositions.insert(vertexPositions.end(), data, data + vertexCount);
}
else if (posAccessor->type == TINYGLTF_TYPE_VEC3)
{
vec3 *data = Recast<vec3 *>(posBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
vertexPositions.push_back(vec4(data[i], 1.0f));
}
}
else if (posAccessor->type == TINYGLTF_TYPE_VEC2)
{
vec2 *data = Recast<vec2 *>(posBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
vertexPositions.push_back(vec4(data[i], 0.0f, 1.0f));
}
}
}
#pragma endregion
#pragma region Vertex Data
vertexData.resize(vertexPositions.size());
// Normal Coords
if (prim.attributes.contains(ANormal))
{
tinygltf::Accessor *normAccessor = &model.accessors[prim.attributes[ANormal]];
assert(normAccessor->count <= MaxValue<u32>);
tinygltf::BufferView *normBufferView = &model.bufferViews[normAccessor->bufferView];
tinygltf::Buffer *normBuffer = &model.buffers[normBufferView->buffer];
usize byteOffset = (normAccessor->byteOffset + normBufferView->byteOffset);
if (normAccessor->type == TINYGLTF_TYPE_VEC4)
{
vec4 *data = Recast<vec4 *>(normBuffer->data.data() + byteOffset);
vec4 *end = data + vertexCount;
u32 idx = vertexOffset;
vec4 *it = data;
while (it != end)
{
vertexData[idx++].m_Normal = *(it++);
}
}
else if (normAccessor->type == TINYGLTF_TYPE_VEC3)
{
vec3 *data = Recast<vec3 *>(normBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
auto norm = vec4(data[i], 0.0f);
vertexData[vertexOffset + i].m_Normal = norm;
}
}
else if (normAccessor->type == TINYGLTF_TYPE_VEC2)
{
vec2 *data = Recast<vec2 *>(normBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
auto norm = vec4(data[i], 0.0f, 0.0f);
vertexData[vertexOffset + i].m_Normal = norm;
}
}
}
// UV0
if (prim.attributes.contains(ATexCoord0))
{
tinygltf::Accessor *uvAccessor = &model.accessors[prim.attributes[ATexCoord0]];
assert(uvAccessor->count <= MaxValue<u32>);
tinygltf::BufferView *uvBufferView = &model.bufferViews[uvAccessor->bufferView];
tinygltf::Buffer *uvBuffer = &model.buffers[uvBufferView->buffer];
usize byteOffset = (uvAccessor->byteOffset + uvBufferView->byteOffset);
assert(uvAccessor->type == TINYGLTF_TYPE_VEC2 &&
uvAccessor->componentType == TINYGLTF_COMPONENT_TYPE_FLOAT);
{
vec2 *data = Recast<vec2 *>(uvBuffer->data.data() + byteOffset);
vec2 *end = data + vertexCount;
u32 idx = vertexOffset;
vec2 *it = data;
while (it != end)
{
vertexData[idx++].m_TexCoord0 = *(it++);
}
}
}
if (prim.attributes.contains(AColor0))
{
tinygltf::Accessor *colorAccessor = &model.accessors[prim.attributes[AColor0]];
assert(colorAccessor->count <= MaxValue<u32>);
tinygltf::BufferView *colorBufferView = &model.bufferViews[colorAccessor->bufferView];
tinygltf::Buffer *colorBuffer = &model.buffers[colorBufferView->buffer];
usize byteOffset = (colorAccessor->byteOffset + colorBufferView->byteOffset);
if (colorAccessor->type == TINYGLTF_TYPE_VEC4)
{
vec4 *data = Recast<vec4 *>(colorBuffer->data.data() + byteOffset);
vec4 *end = data + vertexCount;
u32 idx = vertexOffset;
vec4 *it = data;
while (it != end)
{
vertexData[idx++].m_Color0 = *(it++);
}
}
else if (colorAccessor->type == TINYGLTF_TYPE_VEC3)
{
vec3 *data = Recast<vec3 *>(colorBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
auto color = vec4(data[i], 1.0f);
vertexData[vertexOffset + i].m_Color0 = color;
}
}
}
#pragma endregion
#pragma region Indices
// Indices
if (prim.indices >= 0)
{
tinygltf::Accessor *indexAccessor = &model.accessors[prim.indices];
assert(indexAccessor->count <= MaxValue<u32>);
tinygltf::BufferView *indexBufferView = &model.bufferViews[indexAccessor->bufferView];
tinygltf::Buffer *indexBuffer = &model.buffers[indexBufferView->buffer];
usize byteOffset = (indexAccessor->byteOffset + indexBufferView->byteOffset);
indexCount = Cast<u32>(indexAccessor->count);
indices.reserve(indexOffset + indexCount);
if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_INT)
{
u32 *data = Recast<u32 *>(indexBuffer->data.data() + byteOffset);
indices.insert(indices.end(), data, data + indexCount);
}
else if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_SHORT)
{
u16 *data = Recast<u16 *>(indexBuffer->data.data() + byteOffset);
indices.insert(indices.end(), data, data + indexCount);
}
else if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_BYTE)
{
u8 *data = Recast<u8 *>(indexBuffer->data.data() + byteOffset);
indices.insert(indices.end(), data, data + indexCount);
}
}
else
{
indexCount = vertexCount;
indices.reserve(indexOffset + vertexCount);
for (u32 i = 0; i < indexCount; ++i)
{
indices.push_back(i);
}
}
#pragma endregion
meshPrimitives.push_back({
.m_VertexOffset = vertexOffset,
.m_FirstIndex = indexOffset,
.m_IndexCount = indexCount,
.m_MaterialIdx = prim.material,
.m_TransformIdx = -1,
});
vertexOffset += vertexCount;
indexOffset += indexCount;
}
}
Nodes nodes;
nodes.Add(mat4{1.0f}, -1);
{
if (model.defaultScene >= 0)
{
eastl::function<void(i32, i32)> processNode = [&processNode, &model, &nodes, &meshPrimRanges,
&meshPrimitives](i32 idx, i32 parent) -> void {
const auto *node = &model.nodes[idx];
vec3 nodeTranslation = vec3{0.0f};
quat nodeRotation = quat{1.0f, 0.0f, 0.0f, 0.0f};
vec3 nodeScale = vec3{1.0f};
mat4 nodeMatrix = mat4{1.0f};
if (node->translation.size() == 3)
{
nodeTranslation = glm::make_vec3(node->translation.data());
}
if (node->rotation.size() == 4)
{
nodeRotation = glm::make_quat(node->rotation.data());
}
if (node->scale.size() == 3)
{
// We don't handle the scale 0 special case yet.
nodeScale = glm::make_vec3(node->scale.data());
}
if (node->matrix.size() == 16)
{
nodeMatrix = glm::make_mat4(node->matrix.data());
}
const mat4 transform = translate(mat4(1.0f), nodeTranslation) * mat4_cast(nodeRotation) *
scale(mat4(1.0f), nodeScale) * nodeMatrix;
const i32 nodeArrayIndex = Cast<i32>(nodes.Add(transform, parent));
if (node->mesh >= 0)
{
auto [start, count] = meshPrimRanges[node->mesh];
const auto end = start + count;
for (usize i = start; i != end; ++i)
{
meshPrimitives[i].m_TransformIdx = nodeArrayIndex;
}
}
for (const i32 child : node->children)
{
processNode(child, nodeArrayIndex);
}
};
auto *scene = &model.scenes[model.defaultScene];
for (i32 rootNodeIdx : scene->nodes)
{
processNode(rootNodeIdx, 0);
}
}
}
nodes.Update();
StorageBuffer nodeBuffer;
nodeBuffer.Init(pDevice, nodes.GetGlobalTransformByteSize(), true);
nodeBuffer.Write(pDevice, 0, nodes.GetGlobalTransformByteSize(), nodes.GetGlobalTransformPtr());
BufferHandle nodeHandle = m_ResourceManager->Commit(&nodeBuffer);
#pragma region Staging / Transfer / Uploads
BufferHandle positionBufferHandle;
BufferHandle vertexDataHandle;
IndexBuffer indexBuffer;
{
auto uploadBufferData = [cmd = this->m_CommandBuffer, &stagingBuffers, pDevice](const Buffer *buffer,
const void *data) {
vk::BufferCopy bufferCopy = {.srcOffset = 0, .dstOffset = 0, .size = buffer->GetSize()};
StagingBuffer &stagingBuffer = stagingBuffers.push_back();
stagingBuffer.Init(pDevice, bufferCopy.size);
stagingBuffer.Write(pDevice, 0, bufferCopy.size, data);
cmd.copyBuffer(stagingBuffer.m_Buffer, buffer->m_Buffer, 1, &bufferCopy);
};
StorageBuffer positionBuffer;
positionBuffer.Init(pDevice, vertexPositions.size() * sizeof vertexPositions[0], false);
positionBufferHandle = m_ResourceManager->Commit(&positionBuffer);
uploadBufferData(&positionBuffer, vertexPositions.data());
StorageBuffer vertexDataBuffer;
vertexDataBuffer.Init(pDevice, vertexData.size() * sizeof vertexData[0], false);
vertexDataHandle = m_ResourceManager->Commit(&vertexDataBuffer);
uploadBufferData(&vertexDataBuffer, vertexData.data());
indexBuffer.Init(pDevice, indices.size() * sizeof indices[0]);
uploadBufferData(&indexBuffer, indices.data());
}
#pragma endregion
#if !defined(ASTER_NDEBUG)
m_CommandBuffer.endDebugUtilsLabelEXT();
#endif
AbortIfFailed(m_CommandBuffer.end());
vk::SubmitInfo submitInfo = {
.waitSemaphoreCount = 0,
.pWaitDstStageMask = nullptr,
.commandBufferCount = 1,
.pCommandBuffers = &m_CommandBuffer,
};
vk::Fence fence;
vk::FenceCreateInfo fenceCreateInfo = {};
AbortIfFailed(pDevice->m_Device.createFence(&fenceCreateInfo, nullptr, &fence));
AbortIfFailed(m_TransferQueue.submit(1, &submitInfo, fence));
AbortIfFailed(pDevice->m_Device.waitForFences(1, &fence, true, MaxValue<u32>));
pDevice->m_Device.destroy(fence, nullptr);
AbortIfFailed(pDevice->m_Device.resetCommandPool(m_CommandPool, {}));
for (auto &buffer : stagingBuffers)
{
buffer.Destroy(pDevice);
}
Model::ModelHandles handles = {
.m_VertexPositionHandle = positionBufferHandle,
.m_VertexDataHandle = vertexDataHandle,
.m_MaterialsHandle = materialsHandle,
.m_NodeHandle = nodeHandle,
};
eastl::vector<TextureHandle> textureHandles;
textureHandles.reserve(textureHandleMap.size());
for (auto &[key, val] : textureHandleMap)
{
textureHandles.emplace_back(val);
}
return Model{
m_ResourceManager, std::move(textureHandles), std::move(nodes), handles, indexBuffer, meshPrimitives,
};
}
Model::Model(GpuResourceManager *resourceManager, eastl::vector<TextureHandle> &&textureHandles, Nodes &&nodes,
const ModelHandles &handles, const IndexBuffer &indexBuffer,
const eastl::vector<MeshPrimitive> &meshPrimitives)
: m_ResourceManager(resourceManager)
, m_TextureHandles(std::move(textureHandles))
, m_Nodes(std::move(nodes))
, m_Handles(handles)
, m_IndexBuffer(indexBuffer)
, m_MeshPrimitives(meshPrimitives)
{
}
Model::Model(Model &&other) noexcept
: m_ResourceManager(Take(other.m_ResourceManager))
, m_TextureHandles(std::move(other.m_TextureHandles))
, m_Handles(other.m_Handles)
, m_IndexBuffer(other.m_IndexBuffer)
, m_MeshPrimitives(std::move(other.m_MeshPrimitives))
{
}
Model &
Model::operator=(Model &&other) noexcept
{
if (this == &other)
return *this;
m_ResourceManager = Take(other.m_ResourceManager);
m_TextureHandles = std::move(other.m_TextureHandles);
m_Handles = other.m_Handles;
m_IndexBuffer = other.m_IndexBuffer;
m_MeshPrimitives = std::move(other.m_MeshPrimitives);
return *this;
}
const mat4 &
Model::GetModelTransform() const
{
return m_Nodes[0];
}
void
Model::SetModelTransform(const mat4 &transform)
{
m_Nodes.Set(0, transform);
}
Model::~Model()
{
if (!m_ResourceManager)
return;
m_IndexBuffer.Destroy(m_ResourceManager->m_Device);
m_ResourceManager->Release(m_Handles.m_VertexDataHandle);
m_ResourceManager->Release(m_Handles.m_NodeHandle);
m_ResourceManager->Release(m_Handles.m_VertexPositionHandle);
m_ResourceManager->Release(m_Handles.m_MaterialsHandle);
for (const TextureHandle &handle : m_TextureHandles)
{
m_ResourceManager->Release(handle);
}
}
void
Model::Update()
{
if (m_Nodes.Update())
{
m_ResourceManager->Write(m_Handles.m_NodeHandle, 0, m_Nodes.GetGlobalTransformByteSize(),
m_Nodes.GetGlobalTransformPtr());
}
}
AssetLoader::AssetLoader(GpuResourceManager *resourceManager, vk::Queue transferQueue, u32 transferQueueIndex,
u32 graphicsQueueIndex)
: m_ResourceManager(resourceManager)
, m_TransferQueue(transferQueue)
, m_TransferQueueIndex(transferQueueIndex)
, m_GraphicsQueueIndex(graphicsQueueIndex)
{
const Device *pDevice = resourceManager->m_Device;
const vk::CommandPoolCreateInfo poolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient,
.queueFamilyIndex = transferQueueIndex,
};
AbortIfFailedM(pDevice->m_Device.createCommandPool(&poolCreateInfo, nullptr, &m_CommandPool),
"Transfer command pool creation failed.");
pDevice->SetName(m_CommandPool, "Asset Loader Command Pool");
const vk::CommandBufferAllocateInfo commandBufferAllocateInfo = {
.commandPool = m_CommandPool,
.level = vk::CommandBufferLevel::ePrimary,
.commandBufferCount = 1,
};
AbortIfFailed(pDevice->m_Device.allocateCommandBuffers(&commandBufferAllocateInfo, &m_CommandBuffer));
pDevice->SetName(m_CommandBuffer, "Asset Loader Command Buffer");
}
AssetLoader::~AssetLoader()
{
if (m_ResourceManager)
{
m_ResourceManager->m_Device->m_Device.destroy(m_CommandPool, nullptr);
}
}
AssetLoader::AssetLoader(AssetLoader &&other) noexcept
: m_ResourceManager(Take(other.m_ResourceManager))
, m_CommandPool(other.m_CommandPool)
, m_CommandBuffer(other.m_CommandBuffer)
, m_TransferQueue(other.m_TransferQueue)
, m_TransferQueueIndex(other.m_TransferQueueIndex)
, m_GraphicsQueueIndex(other.m_GraphicsQueueIndex)
{
}
AssetLoader &
AssetLoader::operator=(AssetLoader &&other) noexcept
{
if (this == &other)
return *this;
m_ResourceManager = Take(other.m_ResourceManager);
m_CommandPool = other.m_CommandPool;
m_CommandBuffer = other.m_CommandBuffer;
m_TransferQueue = other.m_TransferQueue;
m_TransferQueueIndex = other.m_TransferQueueIndex;
m_GraphicsQueueIndex = other.m_GraphicsQueueIndex;
return *this;
}