Blaze/Blaze/Source/ModelLoader.cpp

808 lines
30 KiB
C++

#include "ModelLoader.h"
#include <algorithm>
#include <memory_resource>
#include <string_view>
#include <DirectXMath.h>
#include <SDL3/SDL_log.h>
#include <cgltf.h>
#include <stb_image.h>
#include "EntityManager.h"
#include "Frame.h"
#include "GlobalMemory.h"
#include "MacroUtils.h"
namespace Blaze
{
std::optional<TextureID> LoadTexture(
RenderDevice* render_device, VkSampler sampler, cgltf_image const& image, bool const linear )
{
byte* data;
if ( image.buffer_view->data )
{
data = static_cast<byte*>( image.buffer_view->data );
}
else
{
data = static_cast<byte*>( image.buffer_view->buffer->data ) + image.buffer_view->offset;
}
size_t size = image.buffer_view->size;
uint32_t width;
uint32_t height;
uint32_t num_channels = 4;
stbi_uc* texture_data;
{
int w;
int h;
int nc;
int n_req_channels = static_cast<int>( num_channels );
texture_data = stbi_load_from_memory(
reinterpret_cast<stbi_uc const*>( data ), static_cast<int>( size ), &w, &h, &nc, n_req_channels );
ASSERT( nc <= n_req_channels );
if ( not texture_data )
{
return std::nullopt;
}
width = static_cast<uint32_t>( w );
height = static_cast<uint32_t>( h );
}
TextureID texture = render_device->textureManager->CreateTexture(
{ width, height, 1 }, sampler, linear ? VK_FORMAT_R8G8B8A8_UNORM : VK_FORMAT_R8G8B8A8_SRGB );
if ( not texture )
{
return std::nullopt;
}
VkImage texture_image = render_device->textureManager->FetchImage( texture ).value();
// Staging Buffer Create
VkBuffer staging_buffer;
VmaAllocation staging_allocation;
{
VkBufferCreateInfo const staging_buffer_create_info = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = static_cast<VkDeviceSize>( width ) * height * num_channels * sizeof( texture_data[0] ),
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr staging_allocation_create_info = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocation_info;
VK_CHECK( vmaCreateBuffer(
render_device->gpuAllocator,
&staging_buffer_create_info,
&staging_allocation_create_info,
&staging_buffer,
&staging_allocation,
&allocation_info ) );
if ( allocation_info.pMappedData )
{
memcpy( allocation_info.pMappedData, texture_data, staging_buffer_create_info.size );
}
}
// All data is copied to stagingBuffer, don't need this.
stbi_image_free( texture_data );
// Staging -> Texture transfer
{
Frame& frame_in_use = render_device->frames[render_device->frameIndex];
// This should just pass.
VK_CHECK( vkWaitForFences( render_device->device, 1, &frame_in_use.frameReadyToReuse, VK_TRUE, INT64_MAX ) );
// Reset Frame
VK_CHECK( vkResetFences( render_device->device, 1, &frame_in_use.frameReadyToReuse ) );
VK_CHECK( vkResetCommandPool( render_device->device, frame_in_use.commandPool, 0 ) );
VkCommandBufferBeginInfo constexpr begin_info = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.pNext = nullptr,
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
.pInheritanceInfo = nullptr,
};
uint32_t mip_levels = TextureManager::CalculateRequiredMipLevels( width, height, 1 );
VkImageSubresourceRange const subresource_range = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = mip_levels,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkImageMemoryBarrier2 const creation_to_transfer_image_barrier = {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT,
.srcAccessMask = VK_ACCESS_2_NONE,
.dstStageMask = VK_PIPELINE_STAGE_2_COPY_BIT,
.dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = render_device->textureManager->FetchImage( texture ).value(),
.subresourceRange = subresource_range,
};
VkDependencyInfo const creation_to_transfer_dependency = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.pNext = nullptr,
.dependencyFlags = 0,
.memoryBarrierCount = 0,
.pMemoryBarriers = nullptr,
.bufferMemoryBarrierCount = 0,
.pBufferMemoryBarriers = nullptr,
.imageMemoryBarrierCount = 1,
.pImageMemoryBarriers = &creation_to_transfer_image_barrier,
};
VkImageSubresourceRange all_but_last_mip_subresource = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = mip_levels - 1,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkImageSubresourceRange last_mip_subresource = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = mip_levels - 1,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkImageMemoryBarrier2 transfer_to_ready_image_barriers[] = {
{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,
.dstAccessMask = VK_ACCESS_2_SHADER_SAMPLED_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = texture_image,
.subresourceRange = all_but_last_mip_subresource,
},
{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,
.dstAccessMask = VK_ACCESS_2_SHADER_SAMPLED_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = texture_image,
.subresourceRange = last_mip_subresource,
}
};
VkDependencyInfo const transfer_to_ready_dependency = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.pNext = nullptr,
.dependencyFlags = 0,
.memoryBarrierCount = 0,
.pMemoryBarriers = nullptr,
.bufferMemoryBarrierCount = 0,
.pBufferMemoryBarriers = nullptr,
.imageMemoryBarrierCount = _countof( transfer_to_ready_image_barriers ),
.pImageMemoryBarriers = transfer_to_ready_image_barriers,
};
constexpr VkImageSubresourceRange mip_level_subresource = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkImageMemoryBarrier2 prepare_next_mip_level_barriers[] = {
// prepareNextMipLevelSrcImageBarrier
{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.dstAccessMask = VK_ACCESS_2_TRANSFER_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = texture_image,
.subresourceRange = mip_level_subresource,
},
// prepareNextMipLevelDstImageBarrier
{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_COPY_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_BLIT_BIT,
.dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = texture_image,
.subresourceRange = mip_level_subresource,
},
};
VkDependencyInfo const prepare_next_mip_level_dependency = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.pNext = nullptr,
.dependencyFlags = 0,
.memoryBarrierCount = 0,
.pMemoryBarriers = nullptr,
.bufferMemoryBarrierCount = 0,
.pBufferMemoryBarriers = nullptr,
.imageMemoryBarrierCount = _countof( prepare_next_mip_level_barriers ),
.pImageMemoryBarriers = prepare_next_mip_level_barriers,
};
vkBeginCommandBuffer( frame_in_use.commandBuffer, &begin_info );
{
VkImageSubresourceLayers image_subresource_layers = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
};
// TODO: Ensure `bufferRowLength` and `bufferImageHeight` are not required.
VkBufferImageCopy copy_region = {
.bufferOffset = 0,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = image_subresource_layers,
.imageOffset = {0, 0, 0},
.imageExtent = {width, height, 1}
};
// Start
vkCmdPipelineBarrier2( frame_in_use.commandBuffer, &creation_to_transfer_dependency );
// Staging -> Image L0
vkCmdCopyBufferToImage(
frame_in_use.commandBuffer,
staging_buffer,
texture_image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1,
&copy_region );
prepare_next_mip_level_barriers[0].subresourceRange.baseMipLevel = 0;
prepare_next_mip_level_barriers[1].subresourceRange.baseMipLevel = 1;
int32_t mip_src_width = static_cast<int32_t>( width );
int32_t mip_src_height = static_cast<int32_t>( height );
int32_t mip_dst_width = std::max( mip_src_width / 2, 1 );
int32_t mip_dst_height = std::max( mip_src_height / 2, 1 );
VkImageSubresourceLayers constexpr mip_subresource_layers = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkImageBlit2 image_blit = {
.sType = VK_STRUCTURE_TYPE_IMAGE_BLIT_2,
.pNext = nullptr,
.srcSubresource = mip_subresource_layers,
.srcOffsets = {{ 0, 0, 0 }, { mip_src_width, mip_src_height, 1 }},
.dstSubresource = mip_subresource_layers,
.dstOffsets = {{ 0, 0, 0 }, { mip_dst_width, mip_dst_height, 1 }},
};
image_blit.srcSubresource.mipLevel = 0;
image_blit.dstSubresource.mipLevel = 1;
image_blit.srcOffsets[1].x = mip_src_width;
image_blit.srcOffsets[1].y = mip_src_height;
image_blit.dstOffsets[1].x = mip_dst_width;
image_blit.dstOffsets[1].y = mip_dst_height;
VkBlitImageInfo2 blit_info = {
.sType = VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2,
.pNext = nullptr,
.srcImage = texture_image,
.srcImageLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.dstImage = texture_image,
.dstImageLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.regionCount = 1,
.pRegions = &image_blit,
.filter = VK_FILTER_LINEAR,
};
// MipMapping
for ( uint32_t dst_mip_level = 1; dst_mip_level < mip_levels; ++dst_mip_level )
{
vkCmdPipelineBarrier2( frame_in_use.commandBuffer, &prepare_next_mip_level_dependency );
vkCmdBlitImage2( frame_in_use.commandBuffer, &blit_info );
// Prep for NEXT iteration
mip_src_width = mip_dst_width;
mip_src_height = mip_dst_height;
mip_dst_width = std::max( mip_src_width / 2, 1 );
mip_dst_height = std::max( mip_src_height / 2, 1 );
image_blit.srcSubresource.mipLevel = dst_mip_level;
image_blit.dstSubresource.mipLevel = dst_mip_level + 1;
image_blit.srcOffsets[1].x = mip_src_width;
image_blit.srcOffsets[1].y = mip_src_height;
image_blit.dstOffsets[1].x = mip_dst_width;
image_blit.dstOffsets[1].y = mip_dst_height;
// Prep current mip level as source
prepare_next_mip_level_barriers[0].subresourceRange.baseMipLevel = dst_mip_level;
prepare_next_mip_level_barriers[1].subresourceRange.baseMipLevel = dst_mip_level + 1;
}
// End
vkCmdPipelineBarrier2( frame_in_use.commandBuffer, &transfer_to_ready_dependency );
}
vkEndCommandBuffer( frame_in_use.commandBuffer );
VkSubmitInfo submit_info = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = nullptr,
.waitSemaphoreCount = 0,
.pWaitSemaphores = nullptr,
.pWaitDstStageMask = nullptr,
.commandBufferCount = 1,
.pCommandBuffers = &frame_in_use.commandBuffer,
.signalSemaphoreCount = 0,
.pSignalSemaphores = nullptr,
};
VK_CHECK( vkQueueSubmit( render_device->directQueue, 1, &submit_info, frame_in_use.frameReadyToReuse ) );
// Do not reset this. Else, the frame will never be available to the main loop.
VK_CHECK( vkWaitForFences( render_device->device, 1, &frame_in_use.frameReadyToReuse, VK_TRUE, UINT64_MAX ) );
render_device->frameIndex = ( render_device->frameIndex + 1 ) % render_device->GetNumFrames();
}
vmaDestroyBuffer( render_device->gpuAllocator, staging_buffer, staging_allocation );
return texture;
}
// TODO: Cache materials while loading.
uint32_t ProcessMaterial( RenderDevice* render_device, Model* model, cgltf_material const& material )
{
ASSERT( material.has_pbr_metallic_roughness );
auto const base_color_factor = DirectX::XMFLOAT4{ material.pbr_metallic_roughness.base_color_factor };
auto const emissive_factor = DirectX::XMFLOAT4{
material.emissive_factor[0],
material.emissive_factor[1],
material.emissive_factor[2],
std::max( material.emissive_strength.emissive_strength, 1.0f ),
};
VkSampler sampler = nullptr;
TextureID base_color_texture;
TextureID normal_texture;
TextureID metal_rough_texture;
TextureID emissive_texture;
VkSamplerCreateInfo constexpr sampler_create_info = {
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.magFilter = VK_FILTER_LINEAR,
.minFilter = VK_FILTER_LINEAR,
.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR,
.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.mipLodBias = 0.0,
.anisotropyEnable = true,
.maxAnisotropy = 1.0f,
.compareEnable = false,
.compareOp = VK_COMPARE_OP_NEVER,
.minLod = 0.0f,
.maxLod = VK_LOD_CLAMP_NONE,
.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK,
.unnormalizedCoordinates = false,
};
VK_CHECK( vkCreateSampler( render_device->device, &sampler_create_info, nullptr, &sampler ) );
if ( material.pbr_metallic_roughness.base_color_texture.texture )
{
cgltf_image const* base_color_image = material.pbr_metallic_roughness.base_color_texture.texture->image;
auto const base_color_texture_opt = LoadTexture( render_device, sampler, *base_color_image, false );
if ( not base_color_texture_opt )
{
vkDestroySampler( render_device->device, Take( sampler ), nullptr );
return UINT32_MAX;
}
base_color_texture = base_color_texture_opt.value();
}
if ( material.pbr_metallic_roughness.metallic_roughness_texture.texture )
{
cgltf_image const* metal_rough_image = material.pbr_metallic_roughness.metallic_roughness_texture.texture->image;
auto const metal_rough_texture_opt = LoadTexture( render_device, sampler, *metal_rough_image, true );
if ( not metal_rough_texture_opt )
{
vkDestroySampler( render_device->device, Take( sampler ), nullptr );
render_device->textureManager->FreeTexture( &base_color_texture );
return UINT32_MAX;
}
metal_rough_texture = metal_rough_texture_opt.value();
}
if ( material.normal_texture.texture )
{
cgltf_image const* normal_image = material.normal_texture.texture->image;
auto const normal_texture_opt = LoadTexture( render_device, sampler, *normal_image, true );
if ( not normal_texture_opt )
{
vkDestroySampler( render_device->device, Take( sampler ), nullptr );
render_device->textureManager->FreeTexture( &metal_rough_texture );
render_device->textureManager->FreeTexture( &base_color_texture );
return UINT32_MAX;
}
normal_texture = normal_texture_opt.value();
}
if ( material.emissive_texture.texture )
{
cgltf_image const* emissive_image = material.emissive_texture.texture->image;
auto const emissive_texture_opt = LoadTexture( render_device, sampler, *emissive_image, true );
if ( not emissive_texture_opt )
{
vkDestroySampler( render_device->device, Take( sampler ), nullptr );
render_device->textureManager->FreeTexture( &base_color_texture );
render_device->textureManager->FreeTexture( &normal_texture );
render_device->textureManager->FreeTexture( &metal_rough_texture );
return UINT32_MAX;
}
emissive_texture = emissive_texture_opt.value();
}
float const metallic = material.pbr_metallic_roughness.metallic_factor;
float const roughness = material.pbr_metallic_roughness.roughness_factor;
uint32_t const material_idx = static_cast<uint32_t>( model->materials.size() );
model->materials.push_back( {
sampler,
base_color_factor,
emissive_factor,
base_color_texture,
normal_texture,
metal_rough_texture,
emissive_texture,
roughness,
metallic,
} );
return material_idx;
}
void LoadAttribute(
std::vector<Vertex>* vertices,
int32_t const vertex_start,
std::vector<float>* scratch,
cgltf_attribute const& position_attr,
size_t const stride,
size_t const offset,
size_t const components )
{
size_t const float_count = cgltf_accessor_unpack_floats( position_attr.data, nullptr, 0 );
ASSERT( float_count % components == 0 );
scratch->resize( float_count );
cgltf_accessor_unpack_floats( position_attr.data, scratch->data(), scratch->size() );
// Guaranteed to have space for these vertices.
vertices->resize( vertex_start + float_count / components );
byte* write_ptr = reinterpret_cast<byte*>( vertices->data() + vertex_start ) + offset;
float const* read_ptr = scratch->data();
for ( size_t i = vertex_start; i < vertices->size(); ++i )
{
memcpy( write_ptr, read_ptr, components * sizeof( float ) );
read_ptr += components;
write_ptr += stride;
}
scratch->clear();
}
ModelMesh ProcessMesh(
RenderDevice* render_device,
Model* model,
std::vector<Vertex>* vertices,
std::vector<uint32_t>* indices,
cgltf_mesh const& mesh )
{
using namespace std::string_view_literals;
uint32_t const primitive_start = static_cast<uint32_t>( model->primitives.size() );
uint32_t const primitive_count = static_cast<uint32_t>( mesh.primitives_count );
cgltf_primitive const* primitives = mesh.primitives;
for ( uint32_t primitive_index = 0; primitive_index < mesh.primitives_count; ++primitive_index )
{
// VertexStart is per-primitive
int32_t const vertex_start = static_cast<int32_t>( vertices->size() );
cgltf_primitive const& primitive = primitives[primitive_index];
ASSERT( primitive.type == cgltf_primitive_type_triangles );
// Index Buffer
size_t const index_start = indices->size();
size_t const index_count = cgltf_accessor_unpack_indices( primitive.indices, nullptr, sizeof indices->at( 0 ), 0 );
ASSERT( index_count > 0 );
indices->resize( index_start + index_count );
cgltf_accessor_unpack_indices(
primitive.indices, indices->data() + index_start, sizeof indices->at( 0 ), index_count );
// Material
uint32_t material_idx = UINT32_MAX;
if ( primitive.material )
{
material_idx = ProcessMaterial( render_device, model, *primitive.material );
}
model->primitives.push_back( Primitive{
.indexStart = static_cast<uint32_t>( index_start ),
.indexCount = static_cast<uint32_t>( index_count ),
.material = material_idx,
.vertexOffset = vertex_start,
} );
std::vector<float> scratch;
cgltf_attribute const* attributes = primitive.attributes;
for ( uint32_t attrib_index = 0; attrib_index < primitive.attributes_count; ++attrib_index )
{
if ( "POSITION"sv == attributes[attrib_index].name )
{
cgltf_attribute const& position_attr = attributes[attrib_index];
ASSERT( position_attr.data->component_type == cgltf_component_type_r_32f );
ASSERT( position_attr.data->type == cgltf_type_vec3 );
size_t constexpr stride = sizeof( Vertex );
size_t constexpr offset = offsetof( Vertex, position );
size_t constexpr components = 3;
LoadAttribute( vertices, vertex_start, &scratch, position_attr, stride, offset, components );
}
if ( "NORMAL"sv == attributes[attrib_index].name )
{
cgltf_attribute const& normal_attr = attributes[attrib_index];
ASSERT( normal_attr.data->component_type == cgltf_component_type_r_32f );
ASSERT( normal_attr.data->type == cgltf_type_vec3 );
size_t constexpr stride = sizeof( Vertex );
size_t constexpr offset = offsetof( Vertex, normal );
size_t constexpr components = 3;
LoadAttribute( vertices, vertex_start, &scratch, normal_attr, stride, offset, components );
}
if ( "TANGENT"sv == attributes[attrib_index].name )
{
cgltf_attribute const& tangent_attr = attributes[attrib_index];
ASSERT( tangent_attr.data->component_type == cgltf_component_type_r_32f );
ASSERT( tangent_attr.data->type == cgltf_type_vec4 );
size_t constexpr stride = sizeof( Vertex );
size_t constexpr offset = offsetof( Vertex, tangent );
size_t constexpr components = 4;
LoadAttribute( vertices, vertex_start, &scratch, tangent_attr, stride, offset, components );
}
if ( "TEXCOORD_0"sv == attributes[attrib_index].name )
{
cgltf_attribute const& tex_coord_attr = attributes[attrib_index];
ASSERT( tex_coord_attr.data->component_type == cgltf_component_type_r_32f );
ASSERT( tex_coord_attr.data->type == cgltf_type_vec2 );
size_t constexpr stride = sizeof( Vertex );
size_t constexpr offset = offsetof( Vertex, texCoord0 );
size_t constexpr components = 2;
LoadAttribute( vertices, vertex_start, &scratch, tex_coord_attr, stride, offset, components );
}
if ( "TEXCOORD_1"sv == attributes[attrib_index].name )
{
cgltf_attribute const& tex_coord_attr = attributes[attrib_index];
ASSERT( tex_coord_attr.data->component_type == cgltf_component_type_r_32f );
ASSERT( tex_coord_attr.data->type == cgltf_type_vec2 );
size_t constexpr stride = sizeof( Vertex );
size_t constexpr offset = offsetof( Vertex, texCoord1 );
size_t constexpr components = 2;
LoadAttribute( vertices, vertex_start, &scratch, tex_coord_attr, stride, offset, components );
}
if ( "COLOR_0"sv == attributes[attrib_index].name )
{
cgltf_attribute const& color_attr = attributes[attrib_index];
ASSERT( color_attr.data->component_type == cgltf_component_type_r_32f );
size_t constexpr stride = sizeof( Vertex );
size_t constexpr offset = offsetof( Vertex, texCoord1 );
size_t components = 3;
switch ( color_attr.data->type )
{
case cgltf_type_vec3:
components = 3;
break;
case cgltf_type_vec4:
components = 4;
break;
default:
UNREACHABLE;
}
LoadAttribute( vertices, vertex_start, &scratch, color_attr, stride, offset, components );
}
// TODO: Grab other attributes.
}
}
return { primitive_start, primitive_count };
}
Entity* ProcessNode(
RenderDevice* render_device,
EntityManager* entity_manager,
Model* model,
std::vector<Vertex>* vertices,
std::vector<uint32_t>* indices,
cgltf_node const& node )
{
DirectX::XMVECTOR translation;
DirectX::XMVECTOR rotation;
DirectX::XMVECTOR scale;
if ( node.has_matrix )
{
auto const mat = DirectX::XMMATRIX{ node.matrix };
ASSERT( DirectX::XMMatrixDecompose( &scale, &rotation, &translation, mat ) );
}
else
{
translation = node.has_translation
? DirectX::XMVectorSet( node.translation[0], node.translation[1], node.translation[2], 1.0f )
: DirectX::XMVectorZero();
rotation = node.has_rotation
? DirectX::XMVectorSet( node.rotation[0], node.rotation[1], node.rotation[2], node.rotation[3] )
: DirectX::XMQuaternionIdentity();
scale = node.has_scale ? DirectX::XMVectorSet( node.scale[0], node.scale[1], node.scale[2], 1.0f )
: DirectX::XMVectorSplatOne();
}
Entity* entity = entity_manager->CreateEntity( {
.translation = translation,
.rotation = rotation,
.scale = scale,
} );
if ( node.mesh )
{
entity->modelMesh = ProcessMesh( render_device, model, vertices, indices, *node.mesh );
}
for ( uint32_t child_idx = 0; child_idx < node.children_count; ++child_idx )
{
entity->AddChild(
ProcessNode( render_device, entity_manager, model, vertices, indices, *node.children[child_idx] ) );
}
return entity;
}
Entity* LoadModel( Blaze::RenderDevice* render_device, EntityManager* entity_manager, char const* filename )
{
cgltf_data* gltf_model = nullptr;
cgltf_options options = {};
cgltf_result result = cgltf_parse_file( &options, filename, &gltf_model );
if ( result != cgltf_result_success )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "%s failed to load", filename );
cgltf_free( gltf_model );
return nullptr;
}
result = cgltf_validate( gltf_model );
if ( result != cgltf_result_success )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "%s is invalid.", filename );
cgltf_free( gltf_model );
return nullptr;
}
result = cgltf_load_buffers( &options, gltf_model, filename );
if ( result != cgltf_result_success )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "%s buffers failed to load.", filename );
cgltf_free( gltf_model );
return nullptr;
}
Entity* entity = entity_manager->CreateEntity( {
.translation = DirectX::XMVectorZero(),
.rotation = DirectX::XMQuaternionIdentity(),
.scale = DirectX::XMVectorSplatOne(),
} );
// Output data
std::vector<Vertex> vertices;
std::vector<uint32_t> indices;
cgltf_scene const* current_scene = gltf_model->scene;
for ( uint32_t node_idx = 0; node_idx < current_scene->nodes_count; ++node_idx )
{
entity->AddChild( ProcessNode(
render_device, entity_manager, &entity->model, &vertices, &indices, *current_scene->nodes[node_idx] ) );
}
entity->model.vertexBuffer = render_device->bufferManager->CreateVertexBuffer( vertices.size() * sizeof vertices[0] );
if ( not entity->model.vertexBuffer ) return nullptr;
render_device->bufferManager->WriteToBuffer( entity->model.vertexBuffer, vertices );
entity->model.indexBuffer = render_device->bufferManager->CreateIndexBuffer( indices.size() * sizeof indices[0] );
if ( not entity->model.indexBuffer ) return nullptr;
render_device->bufferManager->WriteToBuffer( entity->model.indexBuffer, std::span{ indices } );
cgltf_free( gltf_model );
return entity;
}
} // namespace Blaze