// ============================================= // Aster: asset_loader.cpp // Copyright (c) 2020-2025 Anish Bhobe // ============================================= #include "aster/core/buffer.h" #include "aster/core/device.h" #include "aster/core/image.h" #include "asset_loader.h" #include "helpers.h" #include "aster/systems/commit_manager.h" #include "aster/systems/resource_manager.h" #include #include #include #include #include #include #if defined(LoadImage) #undef LoadImage #endif constexpr vk::CommandBufferBeginInfo OneTimeCmdBeginInfo = {.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit}; vec4 VectorToVec4(const std::vector &vec) { if (vec.empty()) { return vec4{0.0f}; } assert(vec.size() == 4); return {vec[0], vec[1], vec[2], vec[3]}; } vec3 VectorToVec3(const std::vector &vec) { if (vec.empty()) { return vec3{0.0f}; } assert(vec.size() == 3); return {vec[0], vec[1], vec[2]}; } Ref AssetLoader::LoadHdrImage(cstr path, cstr name) const { i32 x, y, nChannels; f32 *data = stbi_loadf(path, &x, &y, &nChannels, 4); assert(nChannels == 3); ERROR_IF(!data, "Could not load {}", path) THEN_ABORT(-1); u32 width = Cast(x); u32 height = Cast(y); auto texture = m_ResourceManager->CombinedImageViews().CreateTexture2D({ .m_Format = vk::Format::eR32G32B32A32Sfloat, .m_Extent = {width, height}, .m_Name = path, .m_IsSampled = true, .m_IsMipMapped = false, .m_IsStorage = false, }); auto *pDevice = m_CommitManager->m_Device; auto stagingBuffer = m_ResourceManager->Buffers().CreateStagingBuffer((sizeof *data) * x * y * 4, "HDR Staging Buffer"); stagingBuffer->Write(0, stagingBuffer->m_Size, data); stbi_image_free(data); #pragma region Setup Copy/Sync primitives vk::BufferImageCopy2 copyRegion = { .bufferOffset = 0, .bufferRowLength = width, .bufferImageHeight = height, .imageSubresource = { .aspectMask = vk::ImageAspectFlagBits::eColor, .mipLevel = 0, .baseArrayLayer = 0, .layerCount = 1, }, .imageOffset = {0, 0, 0}, .imageExtent = texture->m_Extent, }; vk::CopyBufferToImageInfo2 stagingInfo = { .srcBuffer = stagingBuffer->m_Buffer, .dstImage = texture->GetImage(), .dstImageLayout = vk::ImageLayout::eTransferDstOptimal, .regionCount = 1, .pRegions = ©Region, }; vk::ImageMemoryBarrier2 readyToStageBarrier = { .srcStageMask = vk::PipelineStageFlagBits2::eAllCommands, .srcAccessMask = vk::AccessFlagBits2::eNone, .dstStageMask = vk::PipelineStageFlagBits2::eAllTransfer, .dstAccessMask = vk::AccessFlagBits2::eTransferWrite, .oldLayout = vk::ImageLayout::eUndefined, .newLayout = vk::ImageLayout::eTransferDstOptimal, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .image = texture->GetImage(), .subresourceRange = { .aspectMask = vk::ImageAspectFlagBits::eColor, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1, }, }; vk::DependencyInfo readyToStageDependency = { .memoryBarrierCount = 0, .bufferMemoryBarrierCount = 0, .imageMemoryBarrierCount = 1, .pImageMemoryBarriers = &readyToStageBarrier, }; vk::ImageMemoryBarrier2 postStagingBarrier = { .srcStageMask = vk::PipelineStageFlagBits2::eAllTransfer, .srcAccessMask = vk::AccessFlagBits2::eTransferWrite, .dstStageMask = vk::PipelineStageFlagBits2::eFragmentShader | vk::PipelineStageFlagBits2::eComputeShader, .dstAccessMask = vk::AccessFlagBits2::eShaderRead, .oldLayout = vk::ImageLayout::eTransferDstOptimal, .newLayout = vk::ImageLayout::eShaderReadOnlyOptimal, .srcQueueFamilyIndex = m_TransferQueueIndex, .dstQueueFamilyIndex = m_GraphicsQueueIndex, .image = texture->GetImage(), .subresourceRange = { .aspectMask = vk::ImageAspectFlagBits::eColor, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1, }, }; vk::DependencyInfo postStagingDependency = { .memoryBarrierCount = 0, .bufferMemoryBarrierCount = 0, .imageMemoryBarrierCount = 1, .pImageMemoryBarriers = &postStagingBarrier, }; #pragma endregion AbortIfFailed(m_CommandBuffer.begin(&OneTimeCmdBeginInfo)); #if !defined(ASTER_NDEBUG) StackString<128> loadActionName = "Load: "; loadActionName += name ? name : path; vk::DebugUtilsLabelEXT debugLabel = { .pLabelName = loadActionName.c_str(), .color = std::array{1.0f, 1.0f, 1.0f, 1.0f}, }; m_CommandBuffer.beginDebugUtilsLabelEXT(&debugLabel); #endif m_CommandBuffer.pipelineBarrier2(&readyToStageDependency); m_CommandBuffer.copyBufferToImage2(&stagingInfo); m_CommandBuffer.pipelineBarrier2(&postStagingDependency); #if !defined(ASTER_NDEBUG) m_CommandBuffer.endDebugUtilsLabelEXT(); #endif AbortIfFailed(m_CommandBuffer.end()); vk::SubmitInfo submitInfo = { .waitSemaphoreCount = 0, .pWaitDstStageMask = nullptr, .commandBufferCount = 1, .pCommandBuffers = &m_CommandBuffer, }; vk::Fence fence; vk::FenceCreateInfo fenceCreateInfo = {}; AbortIfFailed(pDevice->m_Device.createFence(&fenceCreateInfo, nullptr, &fence)); AbortIfFailed(m_TransferQueue.submit(1, &submitInfo, fence)); AbortIfFailed(pDevice->m_Device.waitForFences(1, &fence, true, MaxValue)); pDevice->m_Device.destroy(fence, nullptr); AbortIfFailed(pDevice->m_Device.resetCommandPool(m_CommandPool, {})); return texture; } void GenerateMipMaps(vk::CommandBuffer commandBuffer, const Ref &texture, vk::ImageLayout initialLayout, vk::ImageLayout finalLayout, vk::PipelineStageFlags2 prevStage, vk::PipelineStageFlags2 finalStage) { #if !defined(ASTER_NDEBUG) vk::DebugUtilsLabelEXT label = { .pLabelName = "Generate Mipmap", .color = std::array{0.9f, 0.9f, 0.9f, 1.0f}, }; commandBuffer.beginDebugUtilsLabelEXT(&label); #endif vk::ImageMemoryBarrier2 imageStartBarrier = { .srcStageMask = prevStage, .srcAccessMask = vk::AccessFlagBits2::eNone, .dstStageMask = vk::PipelineStageFlagBits2::eTransfer, .dstAccessMask = vk::AccessFlagBits2::eTransferRead, .oldLayout = initialLayout, .newLayout = vk::ImageLayout::eTransferSrcOptimal, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .image = texture->m_Image, .subresourceRange = { .aspectMask = vk::ImageAspectFlagBits::eColor, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = texture->m_LayerCount, }, }; vk::ImageMemoryBarrier2 mipsStartBarrier = imageStartBarrier; mipsStartBarrier.dstAccessMask = vk::AccessFlagBits2::eTransferWrite; mipsStartBarrier.oldLayout = vk::ImageLayout::eUndefined; mipsStartBarrier.newLayout = vk::ImageLayout::eTransferDstOptimal; mipsStartBarrier.subresourceRange = { .aspectMask = vk::ImageAspectFlagBits::eColor, .baseMipLevel = 1, .levelCount = texture->GetMipLevels() - 1, .baseArrayLayer = 0, .layerCount = texture->m_LayerCount, }; eastl::fixed_vector startBarriers = { mipsStartBarrier, }; if (initialLayout != imageStartBarrier.newLayout) { startBarriers.push_back(imageStartBarrier); } vk::DependencyInfo imageStartDependency = { .imageMemoryBarrierCount = Cast(startBarriers.size()), .pImageMemoryBarriers = startBarriers.data(), }; vk::ImageMemoryBarrier2 nextMipBarrier = { .srcStageMask = vk::PipelineStageFlagBits2::eTransfer, .srcAccessMask = vk::AccessFlagBits2::eTransferWrite, .dstStageMask = vk::PipelineStageFlagBits2::eTransfer, .dstAccessMask = vk::AccessFlagBits2::eTransferRead, .oldLayout = vk::ImageLayout::eTransferDstOptimal, .newLayout = vk::ImageLayout::eTransferSrcOptimal, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .image = texture->m_Image, .subresourceRange = { .aspectMask = vk::ImageAspectFlagBits::eColor, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = texture->m_LayerCount, }, }; vk::DependencyInfo interMipDependency = { .imageMemoryBarrierCount = 1, .pImageMemoryBarriers = &nextMipBarrier, }; vk::ImageMemoryBarrier2 imageReadyBarrier = { .srcStageMask = vk::PipelineStageFlagBits2::eTransfer, .srcAccessMask = vk::AccessFlagBits2::eTransferWrite, .dstStageMask = finalStage, .dstAccessMask = vk::AccessFlagBits2::eShaderRead, .oldLayout = vk::ImageLayout::eTransferSrcOptimal, .newLayout = finalLayout, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .image = texture->m_Image, .subresourceRange = { .aspectMask = vk::ImageAspectFlagBits::eColor, .baseMipLevel = 0, .levelCount = texture->GetMipLevels(), .baseArrayLayer = 0, .layerCount = texture->m_LayerCount, }, }; vk::DependencyInfo imageReadyDependency = { .imageMemoryBarrierCount = 1, .pImageMemoryBarriers = &imageReadyBarrier, }; vk::ImageBlit2 blitRegion = { .srcSubresource = { .aspectMask = vk::ImageAspectFlagBits::eColor, .baseArrayLayer = 0, .layerCount = texture->m_LayerCount, }, .dstSubresource = { .aspectMask = vk::ImageAspectFlagBits::eColor, .baseArrayLayer = 0, .layerCount = texture->m_LayerCount, }, }; vk::BlitImageInfo2 mipBlitInfo = { .srcImage = texture->m_Image, .srcImageLayout = vk::ImageLayout::eTransferSrcOptimal, .dstImage = texture->m_Image, .dstImageLayout = vk::ImageLayout::eTransferDstOptimal, .regionCount = 1, .pRegions = &blitRegion, .filter = vk::Filter::eLinear, }; auto calcNextMip = [](i32 prev) { return eastl::max(prev / 2, 1); }; // Mip Mapping commandBuffer.pipelineBarrier2(&imageStartDependency); i32 prevMipWidth = Cast(texture->m_Extent.width); i32 prevMipHeight = Cast(texture->m_Extent.height); u32 maxPrevMip = texture->GetMipLevels() - 1; for (u32 prevMipLevel = 0; prevMipLevel < maxPrevMip; ++prevMipLevel) { i32 currentMipWidth = calcNextMip(prevMipWidth); i32 currentMipHeight = calcNextMip(prevMipHeight); u32 currentMipLevel = prevMipLevel + 1; blitRegion.srcSubresource.mipLevel = prevMipLevel; blitRegion.srcOffsets = std::array{ vk::Offset3D{0, 0, 0}, vk::Offset3D{prevMipWidth, prevMipHeight, 1}, }; blitRegion.dstSubresource.mipLevel = currentMipLevel; blitRegion.dstOffsets = std::array{ vk::Offset3D{0, 0, 0}, vk::Offset3D{currentMipWidth, currentMipHeight, 1}, }; nextMipBarrier.subresourceRange.baseMipLevel = currentMipLevel; commandBuffer.blitImage2(&mipBlitInfo); commandBuffer.pipelineBarrier2(&interMipDependency); prevMipHeight = currentMipHeight; prevMipWidth = currentMipWidth; } commandBuffer.pipelineBarrier2(&imageReadyDependency); #if !defined(ASTER_NDEBUG) commandBuffer.endDebugUtilsLabelEXT(); #endif } std::tuple, Ref> AssetLoader::LoadImageToGpu(tinygltf::Image *image, bool isSrgb) const { // TODO(Something not loading properly). assert(image->component == 4); assert(image->height > 0 && image->width > 0); u32 height = Cast(image->height); u32 width = Cast(image->width); vk::Format imageFormat = isSrgb ? vk::Format::eR8G8B8A8Srgb : vk::Format::eR8G8B8A8Unorm; usize byteSize = image->image.size(); auto texture = m_ResourceManager->Images().CreateTexture2D({ .m_Format = imageFormat, .m_Extent = {width, height}, .m_Name = image->name.c_str(), .m_IsSampled = true, .m_IsMipMapped = true, .m_IsStorage = false, }); auto stagingBuffer = m_ResourceManager->Buffers().CreateStagingBuffer(byteSize); stagingBuffer->Write(0, byteSize, image->image.data()); #if !defined(ASTER_NDEBUG) StackString<128> loadActionName = "Load: "; loadActionName += image->name.empty() ? "" : image->name.c_str(); vk::DebugUtilsLabelEXT debugLabel = { .pLabelName = loadActionName.c_str(), .color = std::array{1.0f, 1.0f, 1.0f, 1.0f}, }; m_CommandBuffer.beginDebugUtilsLabelEXT(&debugLabel); #endif #pragma region Barriers and Blits vk::ImageMemoryBarrier2 imageStartBarrier = { .srcStageMask = vk::PipelineStageFlagBits2::eTopOfPipe, .srcAccessMask = vk::AccessFlagBits2::eNone, .dstStageMask = vk::PipelineStageFlagBits2::eTransfer, .dstAccessMask = vk::AccessFlagBits2::eTransferWrite, .oldLayout = vk::ImageLayout::eUndefined, .newLayout = vk::ImageLayout::eTransferDstOptimal, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .image = texture->m_Image, .subresourceRange = { .aspectMask = vk::ImageAspectFlagBits::eColor, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1, }, }; vk::DependencyInfo imageStartDependency = { .memoryBarrierCount = 0, .bufferMemoryBarrierCount = 0, .imageMemoryBarrierCount = 1, .pImageMemoryBarriers = &imageStartBarrier, }; vk::ImageMemoryBarrier2 postStagingBarrier = { .srcStageMask = vk::PipelineStageFlagBits2::eAllTransfer, .srcAccessMask = vk::AccessFlagBits2::eTransferWrite, .dstStageMask = vk::PipelineStageFlagBits2::eAllTransfer, .dstAccessMask = vk::AccessFlagBits2::eTransferRead, .oldLayout = vk::ImageLayout::eTransferDstOptimal, .newLayout = vk::ImageLayout::eTransferSrcOptimal, .srcQueueFamilyIndex = m_TransferQueueIndex, .dstQueueFamilyIndex = m_GraphicsQueueIndex, .image = texture->m_Image, .subresourceRange = { .aspectMask = vk::ImageAspectFlagBits::eColor, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1, }, }; vk::DependencyInfo postStagingDependency = { .imageMemoryBarrierCount = 1, .pImageMemoryBarriers = &postStagingBarrier, }; vk::BufferImageCopy2 imageCopy = { .bufferOffset = 0, .bufferRowLength = Cast(image->width), .bufferImageHeight = Cast(image->height), .imageSubresource = { .aspectMask = vk::ImageAspectFlagBits::eColor, .mipLevel = 0, .baseArrayLayer = 0, .layerCount = 1, }, .imageOffset = {}, .imageExtent = texture->m_Extent, }; vk::CopyBufferToImageInfo2 stagingCopyInfo = { .srcBuffer = stagingBuffer->m_Buffer, .dstImage = texture->m_Image, .dstImageLayout = vk::ImageLayout::eTransferDstOptimal, .regionCount = 1, .pRegions = &imageCopy, }; #pragma endregion m_CommandBuffer.pipelineBarrier2(&imageStartDependency); m_CommandBuffer.copyBufferToImage2(&stagingCopyInfo); m_CommandBuffer.pipelineBarrier2(&postStagingDependency); GenerateMipMaps(m_CommandBuffer, texture, vk::ImageLayout::eTransferSrcOptimal, vk::ImageLayout::eShaderReadOnlyOptimal); #if !defined(ASTER_NDEBUG) m_CommandBuffer.endDebugUtilsLabelEXT(); #endif auto textureView = m_ResourceManager->Views().CreateView( {.m_Image = texture, .m_Name = image->name.data(), .m_AspectMask = vk::ImageAspectFlagBits::eColor}); return {m_CommitManager->CommitTexture(textureView), stagingBuffer}; } Model AssetLoader::LoadModelToGpu(cstr path, cstr name) { namespace fs = std::filesystem; tinygltf::Model model; tinygltf::TinyGLTF loader; const Device *pDevice = m_CommitManager->m_Device; const auto fsPath = fs::absolute(path); const auto ext = fsPath.extension(); if (ext == GLTF_ASCII_FILE_EXTENSION) { std::string err; std::string warn; if (loader.LoadASCIIFromFile(&model, &err, &warn, fsPath.generic_string())) { ERROR_IF(!err.empty(), "{}", err) ELSE_IF_WARN(!warn.empty(), "{}", warn); } } if (ext == GLTF_BINARY_FILE_EXTENSION) { std::string err; std::string warn; if (loader.LoadBinaryFromFile(&model, &err, &warn, fsPath.generic_string())) { ERROR_IF(!err.empty(), "{}", err) ELSE_IF_WARN(!warn.empty(), "{}", warn); } } AbortIfFailed(m_CommandBuffer.begin(&OneTimeCmdBeginInfo)); #if !defined(ASTER_NDEBUG) StackString<128> loadActionName = "Load: "; loadActionName += name ? name : path; vk::DebugUtilsLabelEXT debugLabel = { .pLabelName = loadActionName.c_str(), .color = std::array{1.0f, 1.0f, 1.0f, 1.0f}, }; m_CommandBuffer.beginDebugUtilsLabelEXT(&debugLabel); #endif eastl::vector> stagingBuffers; eastl::hash_map> textureHandleMap; eastl::vector materials; systems::ResId materialsHandle = systems::ResId::Null(); if (!model.materials.empty()) { // TODO("Something broken on load here."); auto getTextureHandle = [this, &textureHandleMap, &stagingBuffers, &model](i32 index, const bool isSrgb) -> systems::ResId { if (index < 0) { return systems::NullId{}; } if (const auto iter = textureHandleMap.find(index); iter != textureHandleMap.end()) { return iter->second; } auto *image = &model.images[index]; auto [handle, staging] = LoadImageToGpu(image, isSrgb); textureHandleMap.emplace(index, handle); stagingBuffers.emplace_back(std::move(staging)); return handle; }; materials.reserve(model.materials.size()); for (auto &material : model.materials) { materials.push_back({ .m_AlbedoFactor = VectorToVec4(material.pbrMetallicRoughness.baseColorFactor), .m_EmissionFactor = VectorToVec3(material.emissiveFactor), .m_MetalFactor = Cast(material.pbrMetallicRoughness.metallicFactor), .m_RoughFactor = Cast(material.pbrMetallicRoughness.roughnessFactor), .m_AlbedoTex = getTextureHandle(material.pbrMetallicRoughness.baseColorTexture.index, true), .m_NormalTex = getTextureHandle(material.normalTexture.index, false), .m_MetalRoughTex = getTextureHandle(material.pbrMetallicRoughness.metallicRoughnessTexture.index, false), .m_OcclusionTex = getTextureHandle(material.occlusionTexture.index, false), .m_EmissionTex = getTextureHandle(material.emissiveTexture.index, true), }); } usize materialsByteSize = materials.size() * sizeof materials[0]; auto materialsBuffer = m_ResourceManager->Buffers().CreateStorageBuffer(materialsByteSize, name); materialsHandle = m_CommitManager->CommitBuffer(materialsBuffer); auto materialStaging = m_ResourceManager->Buffers().CreateStagingBuffer(materialsByteSize); materialStaging->Write(0, materialsByteSize, materials.data()); vk::BufferCopy bufferCopy = {.srcOffset = 0, .dstOffset = 0, .size = materialsByteSize}; m_CommandBuffer.copyBuffer(materialStaging->m_Buffer, materialsBuffer->m_Buffer, 1, &bufferCopy); stagingBuffers.emplace_back(std::move(materialStaging)); } // TODO: Mesh reordering based on nodes AND OR meshoptimizer // TODO: Support scenes eastl::vector vertexPositions; eastl::vector vertexData; eastl::vector indices; eastl::vector meshPrimitives; meshPrimitives.reserve(model.meshes.size()); // Offset, Count eastl::vector> meshPrimRanges; meshPrimRanges.reserve(model.meshes.size()); u32 vertexOffset = 0; u32 indexOffset = 0; for (auto &mesh : model.meshes) { meshPrimRanges.emplace_back(meshPrimitives.size(), mesh.primitives.size()); for (auto &prim : mesh.primitives) { u32 vertexCount = 0; u32 indexCount = 0; #pragma region Position assert(prim.attributes.contains(APosition)); assert(prim.mode == TINYGLTF_MODE_TRIANGLES); { tinygltf::Accessor *posAccessor = &model.accessors[prim.attributes[APosition]]; assert(posAccessor->count <= MaxValue); tinygltf::BufferView *posBufferView = &model.bufferViews[posAccessor->bufferView]; tinygltf::Buffer *posBuffer = &model.buffers[posBufferView->buffer]; usize byteOffset = (posAccessor->byteOffset + posBufferView->byteOffset); vertexCount = Cast(posAccessor->count); vertexPositions.reserve(vertexOffset + vertexCount); if (posAccessor->type == TINYGLTF_TYPE_VEC4) { auto data = Recast(posBuffer->data.data() + byteOffset); vertexPositions.insert(vertexPositions.end(), data, data + vertexCount); } else if (posAccessor->type == TINYGLTF_TYPE_VEC3) { auto data = Recast(posBuffer->data.data() + byteOffset); for (u32 i = 0; i < vertexCount; ++i) { vertexPositions.push_back(vec4(data[i], 1.0f)); } } else if (posAccessor->type == TINYGLTF_TYPE_VEC2) { auto data = Recast(posBuffer->data.data() + byteOffset); for (u32 i = 0; i < vertexCount; ++i) { vertexPositions.push_back(vec4(data[i], 0.0f, 1.0f)); } } } #pragma endregion #pragma region Vertex Data vertexData.resize(vertexPositions.size()); // Normal Coords if (prim.attributes.contains(ANormal)) { tinygltf::Accessor *normAccessor = &model.accessors[prim.attributes[ANormal]]; assert(normAccessor->count <= MaxValue); tinygltf::BufferView *normBufferView = &model.bufferViews[normAccessor->bufferView]; tinygltf::Buffer *normBuffer = &model.buffers[normBufferView->buffer]; usize byteOffset = (normAccessor->byteOffset + normBufferView->byteOffset); if (normAccessor->type == TINYGLTF_TYPE_VEC4) { auto data = Recast(normBuffer->data.data() + byteOffset); vec4 *end = data + vertexCount; u32 idx = vertexOffset; vec4 *it = data; while (it != end) { vertexData[idx++].m_Normal = *(it++); } } else if (normAccessor->type == TINYGLTF_TYPE_VEC3) { auto data = Recast(normBuffer->data.data() + byteOffset); for (u32 i = 0; i < vertexCount; ++i) { auto norm = vec4(data[i], 0.0f); vertexData[vertexOffset + i].m_Normal = norm; } } else if (normAccessor->type == TINYGLTF_TYPE_VEC2) { auto data = Recast(normBuffer->data.data() + byteOffset); for (u32 i = 0; i < vertexCount; ++i) { auto norm = vec4(data[i], 0.0f, 0.0f); vertexData[vertexOffset + i].m_Normal = norm; } } } // UV0 if (prim.attributes.contains(ATexCoord0)) { tinygltf::Accessor *uvAccessor = &model.accessors[prim.attributes[ATexCoord0]]; assert(uvAccessor->count <= MaxValue); tinygltf::BufferView *uvBufferView = &model.bufferViews[uvAccessor->bufferView]; tinygltf::Buffer *uvBuffer = &model.buffers[uvBufferView->buffer]; usize byteOffset = (uvAccessor->byteOffset + uvBufferView->byteOffset); assert(uvAccessor->type == TINYGLTF_TYPE_VEC2 && uvAccessor->componentType == TINYGLTF_COMPONENT_TYPE_FLOAT); { auto data = Recast(uvBuffer->data.data() + byteOffset); vec2 *end = data + vertexCount; u32 idx = vertexOffset; vec2 *it = data; while (it != end) { vertexData[idx++].m_TexCoord0 = *(it++); } } } if (prim.attributes.contains(AColor0)) { tinygltf::Accessor *colorAccessor = &model.accessors[prim.attributes[AColor0]]; assert(colorAccessor->count <= MaxValue); tinygltf::BufferView *colorBufferView = &model.bufferViews[colorAccessor->bufferView]; tinygltf::Buffer *colorBuffer = &model.buffers[colorBufferView->buffer]; usize byteOffset = (colorAccessor->byteOffset + colorBufferView->byteOffset); if (colorAccessor->type == TINYGLTF_TYPE_VEC4) { auto data = Recast(colorBuffer->data.data() + byteOffset); vec4 *end = data + vertexCount; u32 idx = vertexOffset; vec4 *it = data; while (it != end) { vertexData[idx++].m_Color0 = *(it++); } } else if (colorAccessor->type == TINYGLTF_TYPE_VEC3) { auto data = Recast(colorBuffer->data.data() + byteOffset); for (u32 i = 0; i < vertexCount; ++i) { auto color = vec4(data[i], 1.0f); vertexData[vertexOffset + i].m_Color0 = color; } } } #pragma endregion #pragma region Indices // Indices if (prim.indices >= 0) { tinygltf::Accessor *indexAccessor = &model.accessors[prim.indices]; assert(indexAccessor->count <= MaxValue); tinygltf::BufferView *indexBufferView = &model.bufferViews[indexAccessor->bufferView]; tinygltf::Buffer *indexBuffer = &model.buffers[indexBufferView->buffer]; usize byteOffset = (indexAccessor->byteOffset + indexBufferView->byteOffset); indexCount = Cast(indexAccessor->count); indices.reserve(indexOffset + indexCount); if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_INT) { auto data = Recast(indexBuffer->data.data() + byteOffset); indices.insert(indices.end(), data, data + indexCount); } else if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_SHORT) { auto data = Recast(indexBuffer->data.data() + byteOffset); indices.insert(indices.end(), data, data + indexCount); } else if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_BYTE) { auto data = Recast(indexBuffer->data.data() + byteOffset); indices.insert(indices.end(), data, data + indexCount); } } else { indexCount = vertexCount; indices.reserve(indexOffset + vertexCount); for (u32 i = 0; i < indexCount; ++i) { indices.push_back(i); } } #pragma endregion meshPrimitives.push_back({ .m_VertexOffset = vertexOffset, .m_FirstIndex = indexOffset, .m_IndexCount = indexCount, .m_MaterialIdx = prim.material, .m_TransformIdx = -1, }); vertexOffset += vertexCount; indexOffset += indexCount; } } Nodes nodes; nodes.Add(mat4{1.0f}, -1); { if (model.defaultScene >= 0) { eastl::function processNode = [&processNode, &model, &nodes, &meshPrimRanges, &meshPrimitives](i32 idx, i32 parent) -> void { const auto *node = &model.nodes[idx]; auto nodeTranslation = vec3{0.0f}; auto nodeRotation = quat{1.0f, 0.0f, 0.0f, 0.0f}; auto nodeScale = vec3{1.0f}; auto nodeMatrix = mat4{1.0f}; if (node->translation.size() == 3) { nodeTranslation = glm::make_vec3(node->translation.data()); } if (node->rotation.size() == 4) { nodeRotation = glm::make_quat(node->rotation.data()); } if (node->scale.size() == 3) { // We don't handle the scale 0 special case yet. nodeScale = glm::make_vec3(node->scale.data()); } if (node->matrix.size() == 16) { nodeMatrix = glm::make_mat4(node->matrix.data()); } const mat4 transform = translate(mat4(1.0f), nodeTranslation) * mat4_cast(nodeRotation) * scale(mat4(1.0f), nodeScale) * nodeMatrix; const i32 nodeArrayIndex = Cast(nodes.Add(transform, parent)); if (node->mesh >= 0) { auto [start, count] = meshPrimRanges[node->mesh]; const auto end = start + count; for (usize i = start; i != end; ++i) { meshPrimitives[i].m_TransformIdx = nodeArrayIndex; } } for (const i32 child : node->children) { processNode(child, nodeArrayIndex); } }; auto *scene = &model.scenes[model.defaultScene]; for (i32 rootNodeIdx : scene->nodes) { processNode(rootNodeIdx, 0); } } } nodes.Update(); auto nodeBuffer = m_ResourceManager->Buffers().CreateStorageBuffer(nodes.GetGlobalTransformByteSize()); nodeBuffer->Write(0, nodes.GetGlobalTransformByteSize(), nodes.GetGlobalTransformPtr()); systems::ResId nodeHandle = m_CommitManager->CommitBuffer(nodeBuffer); #pragma region Staging / Transfer / Uploads systems::ResId positionBufferHandle = systems::ResId::Null(); systems::ResId vertexDataHandle = systems::ResId::Null(); Ref indexBuffer; { auto uploadBufferData = [cmd = this->m_CommandBuffer, &stagingBuffers, resMan = this->m_ResourceManager, pDevice](const Ref &buffer, const void *data) { const vk::BufferCopy bufferCopy = {.srcOffset = 0, .dstOffset = 0, .size = buffer->m_Size}; auto stagingBuffer = resMan->Buffers().CreateStagingBuffer(bufferCopy.size); stagingBuffer->Write(0, bufferCopy.size, data); cmd.copyBuffer(stagingBuffer->m_Buffer, buffer->m_Buffer, 1, &bufferCopy); stagingBuffers.emplace_back(std::move(stagingBuffer)); }; auto positionBuffer = m_ResourceManager->Buffers().CreateStorageBuffer(vertexPositions.size() * sizeof vertexPositions[0]); positionBufferHandle = m_CommitManager->CommitBuffer(positionBuffer); uploadBufferData(positionBuffer, vertexPositions.data()); auto vertexDataBuffer = m_ResourceManager->Buffers().CreateStorageBuffer(vertexData.size() * sizeof vertexData[0]); vertexDataHandle = m_CommitManager->CommitBuffer(vertexDataBuffer); uploadBufferData(vertexDataBuffer, vertexData.data()); // TODO: Index buffer needs to be separated. indexBuffer = m_ResourceManager->Buffers().CreateStorageBuffer(indices.size() * sizeof indices[0], "Index Buffer"); uploadBufferData(indexBuffer, indices.data()); } #pragma endregion #if !defined(ASTER_NDEBUG) m_CommandBuffer.endDebugUtilsLabelEXT(); #endif AbortIfFailed(m_CommandBuffer.end()); vk::SubmitInfo submitInfo = { .waitSemaphoreCount = 0, .pWaitDstStageMask = nullptr, .commandBufferCount = 1, .pCommandBuffers = &m_CommandBuffer, }; vk::Fence fence; vk::FenceCreateInfo fenceCreateInfo = {}; AbortIfFailed(pDevice->m_Device.createFence(&fenceCreateInfo, nullptr, &fence)); AbortIfFailed(m_TransferQueue.submit(1, &submitInfo, fence)); AbortIfFailed(pDevice->m_Device.waitForFences(1, &fence, true, MaxValue)); pDevice->m_Device.destroy(fence, nullptr); AbortIfFailed(pDevice->m_Device.resetCommandPool(m_CommandPool, {})); Model::ModelHandles handles = { .m_VertexPositionHandle = positionBufferHandle, .m_VertexDataHandle = vertexDataHandle, .m_MaterialsHandle = materialsHandle, .m_NodeHandle = nodeHandle, }; eastl::vector> textureHandles; textureHandles.reserve(textureHandleMap.size()); for (auto &[key, val] : textureHandleMap) { textureHandles.emplace_back(val); } return Model{ m_CommitManager, textureHandles, std::move(nodes), nodeBuffer, handles, indexBuffer, meshPrimitives, }; } Model::Model(systems::CommitManager *resourceManager, eastl::vector> &textureHandles, Nodes &&nodes, Ref nodeBuffer, ModelHandles &handles, Ref indexBuffer, const eastl::vector &meshPrimitives) : m_ResourceManager(resourceManager) , m_TextureHandles(std::move(textureHandles)) , m_Nodes(std::move(nodes)) , m_Handles(std::move(handles)) , m_NodeBuffer(std::move(nodeBuffer)) , m_IndexBuffer(std::move(indexBuffer)) , m_MeshPrimitives(meshPrimitives) { } const mat4 & Model::GetModelTransform() const { return m_Nodes[0]; } void Model::SetModelTransform(const mat4 &transform) { m_Nodes.Set(0, transform); } void Model::Update() { if (m_Nodes.Update()) { m_NodeBuffer->Write(0, m_Nodes.GetGlobalTransformByteSize(), m_Nodes.GetGlobalTransformPtr()); } } AssetLoader::AssetLoader(systems::ResourceManager *resourceManager, systems::CommitManager *commitManager, vk::Queue transferQueue, u32 transferQueueIndex, u32 graphicsQueueIndex) : m_ResourceManager(resourceManager) , m_CommitManager(commitManager) , m_TransferQueue(transferQueue) , m_TransferQueueIndex(transferQueueIndex) , m_GraphicsQueueIndex(graphicsQueueIndex) { const Device *pDevice = commitManager->m_Device; const vk::CommandPoolCreateInfo poolCreateInfo = { .flags = vk::CommandPoolCreateFlagBits::eTransient, .queueFamilyIndex = transferQueueIndex, }; AbortIfFailedM(pDevice->m_Device.createCommandPool(&poolCreateInfo, nullptr, &m_CommandPool), "Transfer command pool creation failed."); pDevice->SetName(m_CommandPool, "Asset Loader Command Pool"); const vk::CommandBufferAllocateInfo commandBufferAllocateInfo = { .commandPool = m_CommandPool, .level = vk::CommandBufferLevel::ePrimary, .commandBufferCount = 1, }; AbortIfFailed(pDevice->m_Device.allocateCommandBuffers(&commandBufferAllocateInfo, &m_CommandBuffer)); pDevice->SetName(m_CommandBuffer, "Asset Loader Command Buffer"); } AssetLoader::~AssetLoader() { if (m_CommitManager && m_CommandPool) { m_CommitManager->m_Device->m_Device.destroy(m_CommandPool, nullptr); } } AssetLoader::AssetLoader(AssetLoader &&other) noexcept : m_ResourceManager(Take(other.m_ResourceManager)) , m_CommitManager(Take(other.m_CommitManager)) , m_CommandPool(Take(other.m_CommandPool)) , m_CommandBuffer(other.m_CommandBuffer) , m_TransferQueue(other.m_TransferQueue) , m_TransferQueueIndex(other.m_TransferQueueIndex) , m_GraphicsQueueIndex(other.m_GraphicsQueueIndex) { } AssetLoader & AssetLoader::operator=(AssetLoader &&other) noexcept { if (this == &other) return *this; m_ResourceManager = Take(other.m_ResourceManager); m_CommitManager = Take(other.m_CommitManager); m_CommandPool = Take(other.m_CommandPool); m_CommandBuffer = other.m_CommandBuffer; m_TransferQueue = other.m_TransferQueue; m_TransferQueueIndex = other.m_TransferQueueIndex; m_GraphicsQueueIndex = other.m_GraphicsQueueIndex; return *this; }