project-aster/samples/03_model_render/model_loader.cpp

641 lines
24 KiB
C++

// =============================================
// Aster: model_loader.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#define TINYGLTF_NOEXCEPTION
#define JSON_NOEXCEPTION
#define TINYGLTF_IMPLEMENTATION
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "model_loader.h"
#include "buffer.h"
#include "device.h"
#include "helpers.h"
#include "image.h"
#include "render_resource_manager.h"
#include <EASTL/array.h>
vec4
VectorToVec4(const std::vector<double> &vec)
{
if (vec.empty())
{
return vec4{0.0f};
}
assert(vec.size() == 4);
return {vec[0], vec[1], vec[2], vec[3]};
}
vec3
VectorToVec3(const std::vector<double> &vec)
{
if (vec.empty())
{
return vec3{0.0f};
}
assert(vec.size() == 3);
return {vec[0], vec[1], vec[2]};
}
TextureHandle
ModelLoader::LoadImage(vk::CommandBuffer commandBuffer, StagingBuffer *stagingBuffer,
tinygltf::Image *image) const
{
assert(image->component == 4);
Texture texture;
usize byteSize = image->image.size();
texture.Init(m_ResourceManager->m_Device, {.width = Cast<u32>(image->width), .height = Cast<u32>(image->height)},
vk::Format::eR8G8B8A8Srgb, true, image->name.data());
stagingBuffer->Init(m_ResourceManager->m_Device, byteSize);
stagingBuffer->Write(m_ResourceManager->m_Device, 0, byteSize, image->image.data());
vk::ImageMemoryBarrier imageStartBarrier = {
.srcAccessMask = vk::AccessFlagBits::eNone,
.dstAccessMask = vk::AccessFlagBits::eTransferWrite,
.oldLayout = vk::ImageLayout::eUndefined,
.newLayout = vk::ImageLayout::eTransferDstOptimal,
.srcQueueFamilyIndex = vk::QueueFamilyIgnored,
.dstQueueFamilyIndex = vk::QueueFamilyIgnored,
.image = texture.m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = texture.GetMipLevels(),
.baseArrayLayer = 0,
.layerCount = 1,
},
};
vk::ImageMemoryBarrier nextMipBarrier = {
.srcAccessMask = vk::AccessFlagBits::eTransferWrite,
.dstAccessMask = vk::AccessFlagBits::eTransferRead,
.oldLayout = vk::ImageLayout::eTransferDstOptimal,
.newLayout = vk::ImageLayout::eTransferSrcOptimal,
.srcQueueFamilyIndex = vk::QueueFamilyIgnored,
.dstQueueFamilyIndex = vk::QueueFamilyIgnored,
.image = texture.m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
vk::ImageMemoryBarrier imageReadyBarrier = {
.srcAccessMask = vk::AccessFlagBits::eTransferRead,
.dstAccessMask = vk::AccessFlagBits::eShaderRead,
.oldLayout = vk::ImageLayout::eTransferSrcOptimal,
.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
.srcQueueFamilyIndex = m_TransferQueueIndex,
.dstQueueFamilyIndex = m_GraphicsQueueIndex,
.image = texture.m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = texture.GetMipLevels(),
.baseArrayLayer = 0,
.layerCount = 1,
},
};
vk::BufferImageCopy imageCopy = {
.bufferOffset = 0,
.bufferRowLength = Cast<u32>(image->width),
.bufferImageHeight = Cast<u32>(image->height),
.imageSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {},
.imageExtent = texture.m_Extent,
};
commandBuffer.pipelineBarrier(vk::PipelineStageFlagBits::eTopOfPipe, vk::PipelineStageFlagBits::eTransfer, {}, 0,
nullptr, 0, nullptr, 1, &imageStartBarrier);
commandBuffer.copyBufferToImage(stagingBuffer->m_Buffer, texture.m_Image, vk::ImageLayout::eTransferDstOptimal, 1,
&imageCopy);
commandBuffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer, vk::PipelineStageFlagBits::eTransfer, {}, 0,
nullptr, 0, nullptr, 1, &nextMipBarrier);
auto calcNextMip = [](i32 prev) {
return eastl::max(prev / 2, 1);
};
i32 prevMipWidth = Cast<i32>(texture.m_Extent.width);
i32 prevMipHeight = Cast<i32>(texture.m_Extent.height);
u32 maxPrevMip = texture.GetMipLevels() - 1;
for (u32 prevMipLevel = 0; prevMipLevel < maxPrevMip; ++prevMipLevel)
{
i32 currentMipWidth = calcNextMip(prevMipWidth);
i32 currentMipHeight = calcNextMip(prevMipHeight);
u32 currentMipLevel = prevMipLevel + 1;
vk::ImageBlit blitRegion = {
.srcSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = prevMipLevel,
.baseArrayLayer = 0,
.layerCount = 1,
},
.srcOffsets = std::array{
vk::Offset3D{0, 0, 0},
vk::Offset3D{prevMipWidth, prevMipHeight, 1},
},
.dstSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = currentMipLevel,
.baseArrayLayer = 0,
.layerCount = 1,
},
.dstOffsets = std::array{
vk::Offset3D{0, 0, 0},
vk::Offset3D{currentMipWidth, currentMipHeight, 1},
},
};
nextMipBarrier.subresourceRange.baseMipLevel = currentMipLevel;
commandBuffer.blitImage(texture.m_Image, vk::ImageLayout::eTransferSrcOptimal, texture.m_Image,
vk::ImageLayout::eTransferDstOptimal, 1, &blitRegion, vk::Filter::eLinear);
commandBuffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer, vk::PipelineStageFlagBits::eTransfer, {}, 0,
nullptr, 0, nullptr, 1, &nextMipBarrier);
prevMipHeight = currentMipHeight;
prevMipWidth = currentMipWidth;
}
commandBuffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer, vk::PipelineStageFlagBits::eFragmentShader, {},
0, nullptr, 0, nullptr, 1, &imageReadyBarrier);
return m_ResourceManager->Commit(&texture);
}
Model
ModelLoader::LoadModel(cstr path, cstr name, bool batched)
{
namespace fs = std::filesystem;
tinygltf::Model model;
tinygltf::TinyGLTF loader;
const Device *pDevice = m_ResourceManager->m_Device;
const auto fsPath = fs::absolute(path);
const auto ext = fsPath.extension();
if (ext == GLTF_ASCII_FILE_EXTENSION)
{
std::string err;
std::string warn;
if (loader.LoadASCIIFromFile(&model, &err, &warn, fsPath.generic_string()))
{
ERROR_IF(!err.empty(), "{}", err)
ELSE_IF_WARN(!warn.empty(), "{}", warn);
}
}
if (ext == GLTF_BINARY_FILE_EXTENSION)
{
std::string err;
std::string warn;
if (loader.LoadBinaryFromFile(&model, &err, &warn, fsPath.generic_string()))
{
ERROR_IF(!err.empty(), "{}", err)
ELSE_IF_WARN(!warn.empty(), "{}", warn);
}
}
{
vk::CommandBufferBeginInfo beginInfo = {.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit};
AbortIfFailed(m_CommandBuffer.begin(&beginInfo));
}
eastl::vector<StagingBuffer> stagingBuffers;
eastl::vector<TextureHandle> textureHandles;
if (!model.images.empty())
{
u32 numImages = Cast<u32>(model.images.size());
stagingBuffers.resize(numImages);
textureHandles.resize(numImages);
auto stagingPtr = stagingBuffers.data();
auto imagePtr = model.images.data();
for (TextureHandle &handle : textureHandles)
{
handle = LoadImage(m_CommandBuffer, stagingPtr++, imagePtr++);
}
}
eastl::vector<Material> materials;
StorageBuffer materialsBuffer;
BufferHandle materialsHandle;
if (!model.materials.empty())
{
auto getTextureHandle = [&textureHandles](i32 index) -> TextureHandle {
if (index >= 0)
{
return textureHandles[index];
}
return {};
};
materials.reserve(model.materials.size());
for (auto &material : model.materials)
{
materials.push_back({
.m_AlbedoFactor = VectorToVec4(material.pbrMetallicRoughness.baseColorFactor),
.m_EmissionFactor = VectorToVec3(material.emissiveFactor),
.m_MetalFactor = Cast<f32>(material.pbrMetallicRoughness.metallicFactor),
.m_RoughFactor = Cast<f32>(material.pbrMetallicRoughness.roughnessFactor),
.m_AlbedoTex = getTextureHandle(material.pbrMetallicRoughness.baseColorTexture.index),
.m_NormalTex = getTextureHandle(material.normalTexture.index),
.m_MetalRoughTex = getTextureHandle(material.pbrMetallicRoughness.metallicRoughnessTexture.index),
.m_OcclusionTex = getTextureHandle(material.occlusionTexture.index),
.m_EmissionTex = getTextureHandle(material.emissiveTexture.index),
});
}
usize materialsByteSize = materials.size() * sizeof materials[0];
materialsBuffer.Init(pDevice, materialsByteSize, false, name);
materialsHandle = m_ResourceManager->Commit(&materialsBuffer);
StagingBuffer &materialStaging = stagingBuffers.push_back();
materialStaging.Init(pDevice, materialsByteSize);
materialStaging.Write(pDevice, 0, materialsByteSize, materials.data());
vk::BufferCopy bufferCopy = {.srcOffset = 0, .dstOffset = 0, .size = materialsByteSize};
m_CommandBuffer.copyBuffer(materialStaging.m_Buffer, materialsBuffer.m_Buffer, 1, &bufferCopy);
}
// TODO: Mesh reordering based on nodes AND OR meshoptimizer
// TODO: Support scenes
eastl::vector<vec4> vertexPositions;
eastl::vector<vec4> normalVectors;
eastl::vector<vec2> texCoord0;
eastl::vector<u32> indices;
eastl::vector<MeshPrimitive> meshPrimitives;
meshPrimitives.reserve(model.meshes.size());
u32 vertexOffset = 0;
i32 normalOffset = 0;
i32 texCoord0Offset = 0;
u32 indexOffset = 0;
for (auto &mesh : model.meshes)
{
for (auto &prim : mesh.primitives)
{
u32 vertexCount = 0;
u32 indexCount = 0;
i32 normalCount = 0;
i32 texCoord0Count = 0;
assert(prim.attributes.contains(APosition));
assert(prim.mode == TINYGLTF_MODE_TRIANGLES);
{
tinygltf::Accessor *posAccessor = &model.accessors[prim.attributes[APosition]];
assert(posAccessor->count <= MaxValue<u32>);
tinygltf::BufferView *posBufferView = &model.bufferViews[posAccessor->bufferView];
tinygltf::Buffer *posBuffer = &model.buffers[posBufferView->buffer];
usize byteOffset = (posAccessor->byteOffset + posBufferView->byteOffset);
vertexCount = Cast<u32>(posAccessor->count);
vertexPositions.reserve(vertexOffset + vertexCount);
if (posAccessor->type == TINYGLTF_TYPE_VEC4)
{
vec4 *data = Recast<vec4 *>(posBuffer->data.data() + byteOffset);
vertexPositions.insert(vertexPositions.end(), data, data + vertexCount);
}
else if (posAccessor->type == TINYGLTF_TYPE_VEC3)
{
vec3 *data = Recast<vec3 *>(posBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
vertexPositions.push_back(vec4(data[i], 1.0f));
}
}
else if (posAccessor->type == TINYGLTF_TYPE_VEC2)
{
vec2 *data = Recast<vec2 *>(posBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
vertexPositions.push_back(vec4(data[i], 0.0f, 1.0f));
}
}
}
// Normal Coords
if (prim.attributes.contains(ANormal))
{
tinygltf::Accessor *normAccessor = &model.accessors[prim.attributes[ANormal]];
assert(normAccessor->count <= MaxValue<u32>);
tinygltf::BufferView *normBufferView = &model.bufferViews[normAccessor->bufferView];
tinygltf::Buffer *normBuffer = &model.buffers[normBufferView->buffer];
usize byteOffset = (normAccessor->byteOffset + normBufferView->byteOffset);
normalCount = Cast<i32>(normAccessor->count);
normalVectors.reserve(vertexPositions.size());
if (normAccessor->type == TINYGLTF_TYPE_VEC4)
{
vec4 *data = Recast<vec4 *>(normBuffer->data.data() + byteOffset);
normalVectors.insert(normalVectors.end(), data, data + vertexCount);
}
else if (normAccessor->type == TINYGLTF_TYPE_VEC3)
{
vec3 *data = Recast<vec3 *>(normBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
normalVectors.push_back(vec4(data[i], 1.0f));
}
}
else if (normAccessor->type == TINYGLTF_TYPE_VEC2)
{
vec2 *data = Recast<vec2 *>(normBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
normalVectors.push_back(vec4(data[i], 0.0f, 1.0f));
}
}
}
// UV0
if (prim.attributes.contains(ATexCoord0))
{
tinygltf::Accessor *uvAccessor = &model.accessors[prim.attributes[ATexCoord0]];
assert(uvAccessor->count <= MaxValue<u32>);
tinygltf::BufferView *uvBufferView = &model.bufferViews[uvAccessor->bufferView];
tinygltf::Buffer *uvBuffer = &model.buffers[uvBufferView->buffer];
usize byteOffset = (uvAccessor->byteOffset + uvBufferView->byteOffset);
texCoord0Count = Cast<i32>(uvAccessor->count);
texCoord0.reserve(vertexPositions.size());
assert(uvAccessor->type == TINYGLTF_TYPE_VEC2 &&
uvAccessor->componentType == TINYGLTF_COMPONENT_TYPE_FLOAT);
{
vec2 *data = Recast<vec2 *>(uvBuffer->data.data() + byteOffset);
texCoord0.insert(texCoord0.end(), data, data + vertexCount);
}
}
// Indices
if (prim.indices >= 0)
{
tinygltf::Accessor *indexAccessor = &model.accessors[prim.indices];
assert(indexAccessor->count <= MaxValue<u32>);
tinygltf::BufferView *indexBufferView = &model.bufferViews[indexAccessor->bufferView];
tinygltf::Buffer *indexBuffer = &model.buffers[indexBufferView->buffer];
usize byteOffset = (indexAccessor->byteOffset + indexBufferView->byteOffset);
indexCount = Cast<u32>(indexAccessor->count);
indices.reserve(indexOffset + indexCount);
if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_INT)
{
u32 *data = Recast<u32 *>(indexBuffer->data.data() + byteOffset);
indices.insert(indices.end(), data, data + indexCount);
}
else if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_SHORT)
{
u16 *data = Recast<u16 *>(indexBuffer->data.data() + byteOffset);
indices.insert(indices.end(), data, data + indexCount);
}
else if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_BYTE)
{
u8 *data = Recast<u8 *>(indexBuffer->data.data() + byteOffset);
indices.insert(indices.end(), data, data + indexCount);
}
}
else
{
indexCount = vertexCount;
indices.reserve(indexOffset + vertexCount);
for (u32 i = 0; i < indexCount; ++i)
{
indices.push_back(i);
}
}
meshPrimitives.push_back({
.m_VertexOffset = vertexOffset,
.m_NormalOffset = normalCount > 0 ? normalOffset : -1,
.m_TexCoord0Offset = texCoord0Count > 0 ? texCoord0Offset : -1,
.m_FirstIndex = indexOffset,
.m_IndexCount = indexCount,
.m_MaterialIdx = prim.material,
});
vertexOffset += vertexCount;
indexOffset += indexCount;
texCoord0Offset += texCoord0Count;
normalOffset += normalCount;
assert(normalVectors.empty() || normalVectors.size() == vertexPositions.size());
assert(texCoord0.empty() || texCoord0.size() == vertexPositions.size());
}
}
#pragma region Staging / Transfer / Uploads
StorageBuffer positionBuffer;
positionBuffer.Init(pDevice, vertexPositions.size() * sizeof vertexPositions[0], false);
BufferHandle positionBufferHandle = m_ResourceManager->Commit(&positionBuffer);
StorageBuffer normalBuffer;
BufferHandle normalBufferHandle;
if (!normalVectors.empty())
{
normalBuffer.Init(pDevice, normalVectors.size() * sizeof normalVectors[0], false);
normalBufferHandle = m_ResourceManager->Commit(&normalBuffer);
}
StorageBuffer texCoord0Buffer;
BufferHandle texCoord0BufferHandle;
if (!texCoord0.empty())
{
texCoord0Buffer.Init(pDevice, texCoord0.size() * sizeof texCoord0[0], false);
texCoord0BufferHandle = m_ResourceManager->Commit(&texCoord0Buffer);
}
IndexBuffer indexBuffer;
indexBuffer.Init(pDevice, indices.size() * sizeof indices[0]);
{
vk::BufferCopy bufferCopy = {.srcOffset = 0, .dstOffset = 0};
bufferCopy.size = positionBuffer.GetSize();
StagingBuffer &positionStaging = stagingBuffers.push_back();
positionStaging.Init(pDevice, bufferCopy.size);
positionStaging.Write(pDevice, 0, bufferCopy.size, vertexPositions.data());
m_CommandBuffer.copyBuffer(positionStaging.m_Buffer, positionBuffer.m_Buffer, 1, &bufferCopy);
if (normalBuffer.IsValid())
{
bufferCopy.size = normalBuffer.GetSize();
StagingBuffer &normalStaging = stagingBuffers.push_back();
normalStaging.Init(pDevice, bufferCopy.size);
normalStaging.Write(pDevice, 0, bufferCopy.size, normalVectors.data());
m_CommandBuffer.copyBuffer(normalStaging.m_Buffer, normalBuffer.m_Buffer, 1, &bufferCopy);
}
if (texCoord0Buffer.IsValid())
{
bufferCopy.size = texCoord0Buffer.GetSize();
StagingBuffer &textureStaging = stagingBuffers.push_back();
textureStaging.Init(pDevice, bufferCopy.size);
textureStaging.Write(pDevice, 0, bufferCopy.size, texCoord0.data());
m_CommandBuffer.copyBuffer(textureStaging.m_Buffer, texCoord0Buffer.m_Buffer, 1, &bufferCopy);
}
bufferCopy.size = indexBuffer.GetSize();
StagingBuffer &indexStaging = stagingBuffers.push_back();
indexStaging.Init(pDevice, bufferCopy.size);
indexStaging.Write(pDevice, 0, bufferCopy.size, indices.data());
m_CommandBuffer.copyBuffer(indexStaging.m_Buffer, indexBuffer.m_Buffer, 1, &bufferCopy);
}
#pragma endregion
AbortIfFailed(m_CommandBuffer.end());
vk::SubmitInfo submitInfo = {
.waitSemaphoreCount = 0,
.pWaitDstStageMask = nullptr,
.commandBufferCount = 1,
.pCommandBuffers = &m_CommandBuffer,
};
vk::Fence fence;
vk::FenceCreateInfo fenceCreateInfo = {};
AbortIfFailed(pDevice->m_Device.createFence(&fenceCreateInfo, nullptr, &fence));
AbortIfFailed(m_TransferQueue.submit(1, &submitInfo, fence));
AbortIfFailed(pDevice->m_Device.waitForFences(1, &fence, true, MaxValue<u32>));
pDevice->m_Device.destroy(fence, nullptr);
AbortIfFailed(pDevice->m_Device.resetCommandPool(
m_CommandPool, batched ? vk::CommandPoolResetFlags{} : vk::CommandPoolResetFlagBits::eReleaseResources));
for (auto &buffer : stagingBuffers)
{
buffer.Destroy(pDevice);
}
return Model{m_ResourceManager, std::move(textureHandles),
materialsHandle, positionBufferHandle, normalBufferHandle,
texCoord0BufferHandle, indexBuffer, meshPrimitives};
}
Model::Model(GpuResourceManager *resourceManager, eastl::vector<TextureHandle> &&textureHandles,
BufferHandle materialsHandle, BufferHandle vertexPosHandle, BufferHandle normalHandle,
BufferHandle uv0Handle, const IndexBuffer &indexBuffer, const eastl::vector<MeshPrimitive> &meshPrimitives)
: m_ResourceManager(resourceManager)
, m_TextureHandles(std::move(textureHandles))
, m_MaterialsHandle(materialsHandle)
, m_VertexPositionHandle(vertexPosHandle)
, m_NormalHandle(normalHandle)
, m_TexCoord0Handle(uv0Handle)
, m_IndexBuffer(indexBuffer)
, m_MeshPrimitives(meshPrimitives)
{
}
Model::Model(Model &&other) noexcept
: m_ResourceManager(Take(other.m_ResourceManager))
, m_TextureHandles(std::move(other.m_TextureHandles))
, m_MaterialsHandle(other.m_MaterialsHandle)
, m_VertexPositionHandle(other.m_VertexPositionHandle)
, m_NormalHandle(other.m_NormalHandle)
, m_TexCoord0Handle(other.m_TexCoord0Handle)
, m_IndexBuffer(other.m_IndexBuffer)
, m_MeshPrimitives(std::move(other.m_MeshPrimitives))
{
}
Model &
Model::operator=(Model &&other) noexcept
{
if (this == &other)
return *this;
m_ResourceManager = Take(other.m_ResourceManager);
m_TextureHandles = std::move(other.m_TextureHandles);
m_MaterialsHandle = other.m_MaterialsHandle;
m_VertexPositionHandle = other.m_VertexPositionHandle;
m_NormalHandle = other.m_NormalHandle;
m_TexCoord0Handle = other.m_TexCoord0Handle;
m_IndexBuffer = other.m_IndexBuffer;
m_MeshPrimitives = std::move(other.m_MeshPrimitives);
return *this;
}
Model::~Model()
{
if (!m_ResourceManager)
return;
m_IndexBuffer.Destroy(m_ResourceManager->m_Device);
m_ResourceManager->Release(m_VertexPositionHandle);
m_ResourceManager->Release(m_NormalHandle);
m_ResourceManager->Release(m_TexCoord0Handle);
for (const TextureHandle &handle : m_TextureHandles)
{
m_ResourceManager->Release(handle);
}
m_ResourceManager->Release(m_MaterialsHandle);
}
ModelLoader::ModelLoader(GpuResourceManager *resourceManager, vk::Queue transferQueue, u32 transferQueueIndex,
u32 graphicsQueueIndex)
: m_ResourceManager(resourceManager)
, m_TransferQueue(transferQueue)
, m_TransferQueueIndex(transferQueueIndex)
, m_GraphicsQueueIndex(graphicsQueueIndex)
{
const Device *pDevice = resourceManager->m_Device;
const vk::CommandPoolCreateInfo poolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient,
.queueFamilyIndex = transferQueueIndex,
};
AbortIfFailedM(pDevice->m_Device.createCommandPool(&poolCreateInfo, nullptr, &m_CommandPool),
"Transfer command pool creation failed.");
const vk::CommandBufferAllocateInfo commandBufferAllocateInfo = {
.commandPool = m_CommandPool,
.level = vk::CommandBufferLevel::ePrimary,
.commandBufferCount = 1,
};
AbortIfFailed(pDevice->m_Device.allocateCommandBuffers(&commandBufferAllocateInfo, &m_CommandBuffer));
}
ModelLoader::~ModelLoader()
{
m_ResourceManager->m_Device->m_Device.destroy(m_CommandPool, nullptr);
}