project-aster/samples/03_model_render/model_loader.cpp

768 lines
28 KiB
C++

// =============================================
// Aster: model_loader.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#define TINYGLTF_NOEXCEPTION
#define JSON_NOEXCEPTION
#define TINYGLTF_IMPLEMENTATION
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "model_loader.h"
#include "buffer.h"
#include "device.h"
#include "helpers.h"
#include "image.h"
#include "gpu_resource_manager.h"
#include <glm/gtc/type_ptr.hpp>
#include <EASTL/array.h>
vec4
VectorToVec4(const std::vector<double> &vec)
{
if (vec.empty())
{
return vec4{0.0f};
}
assert(vec.size() == 4);
return {vec[0], vec[1], vec[2], vec[3]};
}
vec3
VectorToVec3(const std::vector<double> &vec)
{
if (vec.empty())
{
return vec3{0.0f};
}
assert(vec.size() == 3);
return {vec[0], vec[1], vec[2]};
}
TextureHandle
ModelLoader::LoadImage(vk::CommandBuffer commandBuffer, StagingBuffer *stagingBuffer,
tinygltf::Image *image) const
{
assert(image->component == 4);
Texture texture;
usize byteSize = image->image.size();
texture.Init(m_ResourceManager->m_Device, {.width = Cast<u32>(image->width), .height = Cast<u32>(image->height)},
vk::Format::eR8G8B8A8Srgb, true, image->name.data());
stagingBuffer->Init(m_ResourceManager->m_Device, byteSize);
stagingBuffer->Write(m_ResourceManager->m_Device, 0, byteSize, image->image.data());
vk::ImageMemoryBarrier imageStartBarrier = {
.srcAccessMask = vk::AccessFlagBits::eNone,
.dstAccessMask = vk::AccessFlagBits::eTransferWrite,
.oldLayout = vk::ImageLayout::eUndefined,
.newLayout = vk::ImageLayout::eTransferDstOptimal,
.srcQueueFamilyIndex = vk::QueueFamilyIgnored,
.dstQueueFamilyIndex = vk::QueueFamilyIgnored,
.image = texture.m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = texture.GetMipLevels(),
.baseArrayLayer = 0,
.layerCount = 1,
},
};
vk::ImageMemoryBarrier nextMipBarrier = {
.srcAccessMask = vk::AccessFlagBits::eTransferWrite,
.dstAccessMask = vk::AccessFlagBits::eTransferRead,
.oldLayout = vk::ImageLayout::eTransferDstOptimal,
.newLayout = vk::ImageLayout::eTransferSrcOptimal,
.srcQueueFamilyIndex = vk::QueueFamilyIgnored,
.dstQueueFamilyIndex = vk::QueueFamilyIgnored,
.image = texture.m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
vk::ImageMemoryBarrier imageReadyBarrier = {
.srcAccessMask = vk::AccessFlagBits::eTransferRead,
.dstAccessMask = vk::AccessFlagBits::eShaderRead,
.oldLayout = vk::ImageLayout::eTransferSrcOptimal,
.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
.srcQueueFamilyIndex = m_TransferQueueIndex,
.dstQueueFamilyIndex = m_GraphicsQueueIndex,
.image = texture.m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = texture.GetMipLevels(),
.baseArrayLayer = 0,
.layerCount = 1,
},
};
vk::BufferImageCopy imageCopy = {
.bufferOffset = 0,
.bufferRowLength = Cast<u32>(image->width),
.bufferImageHeight = Cast<u32>(image->height),
.imageSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {},
.imageExtent = texture.m_Extent,
};
commandBuffer.pipelineBarrier(vk::PipelineStageFlagBits::eTopOfPipe, vk::PipelineStageFlagBits::eTransfer, {}, 0,
nullptr, 0, nullptr, 1, &imageStartBarrier);
commandBuffer.copyBufferToImage(stagingBuffer->m_Buffer, texture.m_Image, vk::ImageLayout::eTransferDstOptimal, 1,
&imageCopy);
commandBuffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer, vk::PipelineStageFlagBits::eTransfer, {}, 0,
nullptr, 0, nullptr, 1, &nextMipBarrier);
auto calcNextMip = [](i32 prev) {
return eastl::max(prev / 2, 1);
};
i32 prevMipWidth = Cast<i32>(texture.m_Extent.width);
i32 prevMipHeight = Cast<i32>(texture.m_Extent.height);
u32 maxPrevMip = texture.GetMipLevels() - 1;
for (u32 prevMipLevel = 0; prevMipLevel < maxPrevMip; ++prevMipLevel)
{
i32 currentMipWidth = calcNextMip(prevMipWidth);
i32 currentMipHeight = calcNextMip(prevMipHeight);
u32 currentMipLevel = prevMipLevel + 1;
vk::ImageBlit blitRegion = {
.srcSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = prevMipLevel,
.baseArrayLayer = 0,
.layerCount = 1,
},
.srcOffsets = std::array{
vk::Offset3D{0, 0, 0},
vk::Offset3D{prevMipWidth, prevMipHeight, 1},
},
.dstSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = currentMipLevel,
.baseArrayLayer = 0,
.layerCount = 1,
},
.dstOffsets = std::array{
vk::Offset3D{0, 0, 0},
vk::Offset3D{currentMipWidth, currentMipHeight, 1},
},
};
nextMipBarrier.subresourceRange.baseMipLevel = currentMipLevel;
commandBuffer.blitImage(texture.m_Image, vk::ImageLayout::eTransferSrcOptimal, texture.m_Image,
vk::ImageLayout::eTransferDstOptimal, 1, &blitRegion, vk::Filter::eLinear);
commandBuffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer, vk::PipelineStageFlagBits::eTransfer, {}, 0,
nullptr, 0, nullptr, 1, &nextMipBarrier);
prevMipHeight = currentMipHeight;
prevMipWidth = currentMipWidth;
}
commandBuffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer, vk::PipelineStageFlagBits::eFragmentShader, {},
0, nullptr, 0, nullptr, 1, &imageReadyBarrier);
return m_ResourceManager->Commit(&texture);
}
Model
ModelLoader::LoadModel(cstr path, cstr name, bool batched)
{
namespace fs = std::filesystem;
tinygltf::Model model;
tinygltf::TinyGLTF loader;
const Device *pDevice = m_ResourceManager->m_Device;
const auto fsPath = fs::absolute(path);
const auto ext = fsPath.extension();
if (ext == GLTF_ASCII_FILE_EXTENSION)
{
std::string err;
std::string warn;
if (loader.LoadASCIIFromFile(&model, &err, &warn, fsPath.generic_string()))
{
ERROR_IF(!err.empty(), "{}", err)
ELSE_IF_WARN(!warn.empty(), "{}", warn);
}
}
if (ext == GLTF_BINARY_FILE_EXTENSION)
{
std::string err;
std::string warn;
if (loader.LoadBinaryFromFile(&model, &err, &warn, fsPath.generic_string()))
{
ERROR_IF(!err.empty(), "{}", err)
ELSE_IF_WARN(!warn.empty(), "{}", warn);
}
}
{
vk::CommandBufferBeginInfo beginInfo = {.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit};
AbortIfFailed(m_CommandBuffer.begin(&beginInfo));
}
eastl::vector<StagingBuffer> stagingBuffers;
eastl::vector<TextureHandle> textureHandles;
if (!model.images.empty())
{
u32 numImages = Cast<u32>(model.images.size());
stagingBuffers.resize(numImages);
textureHandles.resize(numImages);
auto stagingPtr = stagingBuffers.data();
auto imagePtr = model.images.data();
for (TextureHandle &handle : textureHandles)
{
handle = LoadImage(m_CommandBuffer, stagingPtr++, imagePtr++);
}
}
eastl::vector<Material> materials;
StorageBuffer materialsBuffer;
BufferHandle materialsHandle;
if (!model.materials.empty())
{
auto getTextureHandle = [&textureHandles](i32 index) -> TextureHandle {
if (index >= 0)
{
return textureHandles[index];
}
return {};
};
materials.reserve(model.materials.size());
for (auto &material : model.materials)
{
materials.push_back({
.m_AlbedoFactor = VectorToVec4(material.pbrMetallicRoughness.baseColorFactor),
.m_EmissionFactor = VectorToVec3(material.emissiveFactor),
.m_MetalFactor = Cast<f32>(material.pbrMetallicRoughness.metallicFactor),
.m_RoughFactor = Cast<f32>(material.pbrMetallicRoughness.roughnessFactor),
.m_AlbedoTex = getTextureHandle(material.pbrMetallicRoughness.baseColorTexture.index),
.m_NormalTex = getTextureHandle(material.normalTexture.index),
.m_MetalRoughTex = getTextureHandle(material.pbrMetallicRoughness.metallicRoughnessTexture.index),
.m_OcclusionTex = getTextureHandle(material.occlusionTexture.index),
.m_EmissionTex = getTextureHandle(material.emissiveTexture.index),
});
}
usize materialsByteSize = materials.size() * sizeof materials[0];
materialsBuffer.Init(pDevice, materialsByteSize, false, name);
materialsHandle = m_ResourceManager->Commit(&materialsBuffer);
StagingBuffer &materialStaging = stagingBuffers.push_back();
materialStaging.Init(pDevice, materialsByteSize);
materialStaging.Write(pDevice, 0, materialsByteSize, materials.data());
vk::BufferCopy bufferCopy = {.srcOffset = 0, .dstOffset = 0, .size = materialsByteSize};
m_CommandBuffer.copyBuffer(materialStaging.m_Buffer, materialsBuffer.m_Buffer, 1, &bufferCopy);
}
// TODO: Mesh reordering based on nodes AND OR meshoptimizer
// TODO: Support scenes
eastl::vector<vec4> vertexPositions;
eastl::vector<VertexData> vertexData;
eastl::vector<u32> indices;
eastl::vector<MeshPrimitive> meshPrimitives;
meshPrimitives.reserve(model.meshes.size());
// Offset, Count
eastl::vector<eastl::pair<usize, usize>> meshPrimRanges;
meshPrimRanges.reserve(model.meshes.size());
u32 vertexOffset = 0;
u32 indexOffset = 0;
for (auto &mesh : model.meshes)
{
meshPrimRanges.emplace_back(meshPrimitives.size(), mesh.primitives.size());
for (auto &prim : mesh.primitives)
{
u32 vertexCount = 0;
u32 indexCount = 0;
#pragma region Position
assert(prim.attributes.contains(APosition));
assert(prim.mode == TINYGLTF_MODE_TRIANGLES);
{
tinygltf::Accessor *posAccessor = &model.accessors[prim.attributes[APosition]];
assert(posAccessor->count <= MaxValue<u32>);
tinygltf::BufferView *posBufferView = &model.bufferViews[posAccessor->bufferView];
tinygltf::Buffer *posBuffer = &model.buffers[posBufferView->buffer];
usize byteOffset = (posAccessor->byteOffset + posBufferView->byteOffset);
vertexCount = Cast<u32>(posAccessor->count);
vertexPositions.reserve(vertexOffset + vertexCount);
if (posAccessor->type == TINYGLTF_TYPE_VEC4)
{
vec4 *data = Recast<vec4 *>(posBuffer->data.data() + byteOffset);
vertexPositions.insert(vertexPositions.end(), data, data + vertexCount);
}
else if (posAccessor->type == TINYGLTF_TYPE_VEC3)
{
vec3 *data = Recast<vec3 *>(posBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
vertexPositions.push_back(vec4(data[i], 1.0f));
}
}
else if (posAccessor->type == TINYGLTF_TYPE_VEC2)
{
vec2 *data = Recast<vec2 *>(posBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
vertexPositions.push_back(vec4(data[i], 0.0f, 1.0f));
}
}
}
#pragma endregion
vertexData.resize(vertexPositions.size());
#pragma region Normal
// Normal Coords
if (prim.attributes.contains(ANormal))
{
tinygltf::Accessor *normAccessor = &model.accessors[prim.attributes[ANormal]];
assert(normAccessor->count <= MaxValue<u32>);
tinygltf::BufferView *normBufferView = &model.bufferViews[normAccessor->bufferView];
tinygltf::Buffer *normBuffer = &model.buffers[normBufferView->buffer];
usize byteOffset = (normAccessor->byteOffset + normBufferView->byteOffset);
if (normAccessor->type == TINYGLTF_TYPE_VEC4)
{
vec4 *data = Recast<vec4 *>(normBuffer->data.data() + byteOffset);
vec4 *end = data + vertexCount;
u32 idx = vertexOffset;
vec4 *it = data;
while (it != end)
{
vertexData[idx++].m_Normal = *(it++);
}
}
else if (normAccessor->type == TINYGLTF_TYPE_VEC3)
{
vec3 *data = Recast<vec3 *>(normBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
auto norm = vec4(data[i], 0.0f);
vertexData[vertexOffset + i].m_Normal = norm;
}
}
else if (normAccessor->type == TINYGLTF_TYPE_VEC2)
{
vec2 *data = Recast<vec2 *>(normBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
auto norm = vec4(data[i], 0.0f, 0.0f);
vertexData[vertexOffset + i].m_Normal = norm;
}
}
}
#pragma endregion
#pragma region UV0
// UV0
if (prim.attributes.contains(ATexCoord0))
{
tinygltf::Accessor *uvAccessor = &model.accessors[prim.attributes[ATexCoord0]];
assert(uvAccessor->count <= MaxValue<u32>);
tinygltf::BufferView *uvBufferView = &model.bufferViews[uvAccessor->bufferView];
tinygltf::Buffer *uvBuffer = &model.buffers[uvBufferView->buffer];
usize byteOffset = (uvAccessor->byteOffset + uvBufferView->byteOffset);
assert(uvAccessor->type == TINYGLTF_TYPE_VEC2 &&
uvAccessor->componentType == TINYGLTF_COMPONENT_TYPE_FLOAT);
{
vec2 *data = Recast<vec2 *>(uvBuffer->data.data() + byteOffset);
vec2 *end = data + vertexCount;
u32 idx = vertexOffset;
vec2 *it = data;
while (it != end)
{
vertexData[idx++].m_TexCoord0 = *(it++);
}
}
}
#pragma endregion
#pragma region Color
if (prim.attributes.contains(AColor0))
{
tinygltf::Accessor *colorAccessor = &model.accessors[prim.attributes[AColor0]];
assert(colorAccessor->count <= MaxValue<u32>);
tinygltf::BufferView *colorBufferView = &model.bufferViews[colorAccessor->bufferView];
tinygltf::Buffer *colorBuffer = &model.buffers[colorBufferView->buffer];
usize byteOffset = (colorAccessor->byteOffset + colorBufferView->byteOffset);
if (colorAccessor->type == TINYGLTF_TYPE_VEC4)
{
vec4 *data = Recast<vec4 *>(colorBuffer->data.data() + byteOffset);
vec4 *end = data + vertexCount;
u32 idx = vertexOffset;
vec4 *it = data;
while (it != end)
{
vertexData[idx++].m_Color0 = *(it++);
}
}
else if (colorAccessor->type == TINYGLTF_TYPE_VEC3)
{
vec3 *data = Recast<vec3 *>(colorBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
auto color = vec4(data[i], 1.0f);
vertexData[vertexOffset + i].m_Color0 = color;
}
}
}
#pragma endregion
#pragma region Indices
// Indices
if (prim.indices >= 0)
{
tinygltf::Accessor *indexAccessor = &model.accessors[prim.indices];
assert(indexAccessor->count <= MaxValue<u32>);
tinygltf::BufferView *indexBufferView = &model.bufferViews[indexAccessor->bufferView];
tinygltf::Buffer *indexBuffer = &model.buffers[indexBufferView->buffer];
usize byteOffset = (indexAccessor->byteOffset + indexBufferView->byteOffset);
indexCount = Cast<u32>(indexAccessor->count);
indices.reserve(indexOffset + indexCount);
if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_INT)
{
u32 *data = Recast<u32 *>(indexBuffer->data.data() + byteOffset);
indices.insert(indices.end(), data, data + indexCount);
}
else if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_SHORT)
{
u16 *data = Recast<u16 *>(indexBuffer->data.data() + byteOffset);
indices.insert(indices.end(), data, data + indexCount);
}
else if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_BYTE)
{
u8 *data = Recast<u8 *>(indexBuffer->data.data() + byteOffset);
indices.insert(indices.end(), data, data + indexCount);
}
}
else
{
indexCount = vertexCount;
indices.reserve(indexOffset + vertexCount);
for (u32 i = 0; i < indexCount; ++i)
{
indices.push_back(i);
}
}
#pragma endregion
meshPrimitives.push_back({
.m_VertexOffset = vertexOffset,
.m_FirstIndex = indexOffset,
.m_IndexCount = indexCount,
.m_MaterialIdx = prim.material,
.m_TransformIdx = -1,
});
vertexOffset += vertexCount;
indexOffset += indexCount;
}
}
Nodes nodes;
nodes.Add(mat4{1.0f}, -1);
{
if (model.defaultScene >= 0)
{
eastl::function<void(i32,i32)> processNode = [&processNode, &model, &nodes, &meshPrimRanges, &meshPrimitives](i32 idx, i32 parent) -> void {
const auto *node = &model.nodes[idx];
vec3 nodeTranslation = vec3{0.0f};
quat nodeRotation = quat{1.0f, 0.0f, 0.0f, 0.0f};
vec3 nodeScale = vec3{1.0f};
mat4 nodeMatrix = mat4{1.0f};
if (node->translation.size() == 3)
{
nodeTranslation = glm::make_vec3(node->translation.data());
}
if (node->rotation.size() == 4)
{
nodeRotation = glm::make_quat(node->rotation.data());
}
if (node->scale.size() == 3)
{
// We don't handle the scale 0 special case yet.
nodeScale = glm::make_vec3(node->scale.data());
}
if (node->matrix.size() == 16)
{
nodeMatrix = glm::make_mat4(node->matrix.data());
}
const mat4 transform =
translate(mat4(1.0f), nodeTranslation) * mat4_cast(nodeRotation) * scale(mat4(1.0f), nodeScale) * nodeMatrix;
const i32 nodeArrayIndex = Cast<i32>(nodes.Add(transform, parent));
if (node->mesh >= 0)
{
auto [start, count] = meshPrimRanges[node->mesh];
const auto end = start + count;
for (usize i = start; i != end; ++i)
{
meshPrimitives[i].m_TransformIdx = nodeArrayIndex;
}
}
for (const i32 child : node->children)
{
processNode(child, nodeArrayIndex);
}
};
auto *scene = &model.scenes[model.defaultScene];
for (i32 rootNodeIdx : scene->nodes)
{
processNode(rootNodeIdx, 0);
}
}
}
nodes.Update();
StorageBuffer nodeBuffer;
nodeBuffer.Init(pDevice, nodes.GetGlobalTransformByteSize(), true);
nodeBuffer.Write(pDevice, 0, nodes.GetGlobalTransformByteSize(), nodes.GetGlobalTransformPtr());
BufferHandle nodeHandle = m_ResourceManager->Commit(&nodeBuffer);
#pragma region Staging / Transfer / Uploads
BufferHandle positionBufferHandle;
BufferHandle vertexDataHandle;
IndexBuffer indexBuffer;
{
auto uploadBufferData = [cmd = this->m_CommandBuffer, &stagingBuffers, pDevice](const Buffer *buffer,
const void *data) {
vk::BufferCopy bufferCopy = {.srcOffset = 0, .dstOffset = 0, .size = buffer->GetSize()};
StagingBuffer &stagingBuffer = stagingBuffers.push_back();
stagingBuffer.Init(pDevice, bufferCopy.size);
stagingBuffer.Write(pDevice, 0, bufferCopy.size, data);
cmd.copyBuffer(stagingBuffer.m_Buffer, buffer->m_Buffer, 1, &bufferCopy);
};
StorageBuffer positionBuffer;
positionBuffer.Init(pDevice, vertexPositions.size() * sizeof vertexPositions[0], false);
positionBufferHandle = m_ResourceManager->Commit(&positionBuffer);
uploadBufferData(&positionBuffer, vertexPositions.data());
StorageBuffer vertexDataBuffer;
vertexDataBuffer.Init(pDevice, vertexData.size() * sizeof vertexData[0], false);
vertexDataHandle = m_ResourceManager->Commit(&vertexDataBuffer);
uploadBufferData(&vertexDataBuffer, vertexData.data());
indexBuffer.Init(pDevice, indices.size() * sizeof indices[0]);
uploadBufferData(&indexBuffer, indices.data());
}
#pragma endregion
AbortIfFailed(m_CommandBuffer.end());
vk::SubmitInfo submitInfo = {
.waitSemaphoreCount = 0,
.pWaitDstStageMask = nullptr,
.commandBufferCount = 1,
.pCommandBuffers = &m_CommandBuffer,
};
vk::Fence fence;
vk::FenceCreateInfo fenceCreateInfo = {};
AbortIfFailed(pDevice->m_Device.createFence(&fenceCreateInfo, nullptr, &fence));
AbortIfFailed(m_TransferQueue.submit(1, &submitInfo, fence));
AbortIfFailed(pDevice->m_Device.waitForFences(1, &fence, true, MaxValue<u32>));
pDevice->m_Device.destroy(fence, nullptr);
AbortIfFailed(pDevice->m_Device.resetCommandPool(
m_CommandPool, batched ? vk::CommandPoolResetFlags{} : vk::CommandPoolResetFlagBits::eReleaseResources));
for (auto &buffer : stagingBuffers)
{
buffer.Destroy(pDevice);
}
Model::ModelHandles handles = {
.m_VertexPositionHandle = positionBufferHandle,
.m_VertexDataHandle = vertexDataHandle,
.m_MaterialsHandle = materialsHandle,
.m_NodeHandle = nodeHandle,
};
return Model{m_ResourceManager, std::move(textureHandles), std::move(nodes),
handles, indexBuffer, meshPrimitives, };
}
Model::Model(GpuResourceManager *resourceManager, eastl::vector<TextureHandle> &&textureHandles, Nodes &&nodes,
const ModelHandles &handles, const IndexBuffer &indexBuffer, const eastl::vector<MeshPrimitive> &meshPrimitives)
: m_ResourceManager(resourceManager)
, m_TextureHandles(std::move(textureHandles))
, m_Nodes(std::move(nodes))
, m_Handles(handles)
, m_IndexBuffer(indexBuffer)
, m_MeshPrimitives(meshPrimitives)
{
}
Model::Model(Model &&other) noexcept
: m_ResourceManager(Take(other.m_ResourceManager))
, m_TextureHandles(std::move(other.m_TextureHandles))
, m_Handles(other.m_Handles)
, m_IndexBuffer(other.m_IndexBuffer)
, m_MeshPrimitives(std::move(other.m_MeshPrimitives))
{
}
Model &
Model::operator=(Model &&other) noexcept
{
if (this == &other)
return *this;
m_ResourceManager = Take(other.m_ResourceManager);
m_TextureHandles = std::move(other.m_TextureHandles);
m_Handles = other.m_Handles;
m_IndexBuffer = other.m_IndexBuffer;
m_MeshPrimitives = std::move(other.m_MeshPrimitives);
return *this;
}
const mat4 &
Model::GetModelTransform() const
{
return m_Nodes[0];
}
void
Model::SetModelTransform(const mat4 &transform)
{
m_Nodes.Set(0, transform);
}
Model::~Model()
{
if (!m_ResourceManager)
return;
m_IndexBuffer.Destroy(m_ResourceManager->m_Device);
m_ResourceManager->Release(m_Handles.m_VertexDataHandle);
m_ResourceManager->Release(m_Handles.m_NodeHandle);
m_ResourceManager->Release(m_Handles.m_VertexPositionHandle);
m_ResourceManager->Release(m_Handles.m_MaterialsHandle);
for (const TextureHandle &handle : m_TextureHandles)
{
m_ResourceManager->Release(handle);
}
}
void
Model::Update()
{
if (m_Nodes.Update())
{
m_ResourceManager->Write(m_Handles.m_NodeHandle, 0, m_Nodes.GetGlobalTransformByteSize(),
m_Nodes.GetGlobalTransformPtr());
}
}
ModelLoader::ModelLoader(GpuResourceManager *resourceManager, vk::Queue transferQueue, u32 transferQueueIndex,
u32 graphicsQueueIndex)
: m_ResourceManager(resourceManager)
, m_TransferQueue(transferQueue)
, m_TransferQueueIndex(transferQueueIndex)
, m_GraphicsQueueIndex(graphicsQueueIndex)
{
const Device *pDevice = resourceManager->m_Device;
const vk::CommandPoolCreateInfo poolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient,
.queueFamilyIndex = transferQueueIndex,
};
AbortIfFailedM(pDevice->m_Device.createCommandPool(&poolCreateInfo, nullptr, &m_CommandPool),
"Transfer command pool creation failed.");
const vk::CommandBufferAllocateInfo commandBufferAllocateInfo = {
.commandPool = m_CommandPool,
.level = vk::CommandBufferLevel::ePrimary,
.commandBufferCount = 1,
};
AbortIfFailed(pDevice->m_Device.allocateCommandBuffers(&commandBufferAllocateInfo, &m_CommandBuffer));
}
ModelLoader::~ModelLoader()
{
if (m_ResourceManager)
{
m_ResourceManager->m_Device->m_Device.destroy(m_CommandPool, nullptr);
}
}
ModelLoader::ModelLoader(ModelLoader &&other) noexcept: m_ResourceManager(Take(other.m_ResourceManager)),
m_CommandPool(other.m_CommandPool),
m_CommandBuffer(other.m_CommandBuffer),
m_TransferQueue(other.m_TransferQueue),
m_TransferQueueIndex(other.m_TransferQueueIndex),
m_GraphicsQueueIndex(other.m_GraphicsQueueIndex)
{
}
ModelLoader &
ModelLoader::operator=(ModelLoader &&other) noexcept
{
if (this == &other)
return *this;
m_ResourceManager = Take(other.m_ResourceManager);
m_CommandPool = other.m_CommandPool;
m_CommandBuffer = other.m_CommandBuffer;
m_TransferQueue = other.m_TransferQueue;
m_TransferQueueIndex = other.m_TransferQueueIndex;
m_GraphicsQueueIndex = other.m_GraphicsQueueIndex;
return *this;
}