Compare commits

...

6 Commits

Author SHA1 Message Date
Anish Bhobe 00d5e1476c Use buffer device address. 2025-06-28 15:44:28 +02:00
Anish Bhobe 9a2a6a3340 Added Model Loading. 2025-06-25 20:16:30 +02:00
Anish Bhobe dfdc0dd6d6 Added BufferManager and Entity Hierarchy. 2025-06-21 16:12:13 +02:00
Anish Bhobe 52d3e63223 Depth Buffers Added. 2025-06-20 19:42:11 +02:00
Anish Bhobe babbe93479 Averaging frame times. 2025-06-18 17:18:58 +02:00
Anish Bhobe 0e55949309 Fully bindless textures. 2025-06-18 17:07:02 +02:00
37 changed files with 2372 additions and 888 deletions

View File

@ -4,6 +4,7 @@ AlignAfterOpenBracket: AlwaysBreak
AlignConsecutiveAssignments:
Enabled: true
AcrossEmptyLines: true
AlignCompound: true
AlignFunctionPointers: true
AlignConsecutiveDeclarations:
Enabled: true

2
.gitattributes vendored
View File

@ -3,3 +3,5 @@
*.glb filter=lfs diff=lfs merge=lfs -text
*.hdr filter=lfs diff=lfs merge=lfs -text
*.exr filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.gltf filter=lfs diff=lfs merge=lfs -text

BIN
Assets/Models/Box.glb (Stored with Git LFS) Normal file

Binary file not shown.

BIN
Assets/Models/BoxTextured.glb (Stored with Git LFS) Normal file

Binary file not shown.

BIN
Assets/Models/BoxVertexColors.glb (Stored with Git LFS) Normal file

Binary file not shown.

BIN
Assets/Models/OrientationTest.glb (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -0,0 +1,25 @@
[vk::binding(0, 0)] uniform __DynamicResource<__DynamicResourceKind.Sampler> gTextures[];
public struct RID<T> where T : IOpaqueDescriptor {
private uint internal;
private const static uint INDEX_MASK = 0x0007FFFF;
private const static uint GENERATION_MASK = ~INDEX_MASK;
public property bool hasValue {
get {
return (internal & GENERATION_MASK) > 0;
}
}
public property T value {
get {
// TODO: Check if has value else placeholder.
return gTextures[internal & INDEX_MASK].asOpaqueDescriptor<T>();
}
}
}
public extension <T> T where T: IOpaqueDescriptor {
public typealias RID = RID<T>;
}

View File

@ -1,24 +1,65 @@
import Bindless;
struct VertexOut {
float4 outPosition : SV_Position;
float4 screenPosition : ScreenPosition;
float4 vertexColor : CoarseColor;
float2 texCoord0 : TexCoord0;
float4 outPosition : SV_Position;
float4 worldPosition : WorldPosition;
float4 normal : WorldNormal;
float2 texCoord0 : TexCoord0;
float2 texCoord1 : TexCoord1;
float4 vertexColor0 : VertexColor;
};
struct CameraData {
float4x4 view;
float4x4 proj;
float4 position;
};
struct PointLight {
float3 position;
float range;
float3 color;
float attenuation;
};
struct DirectionalLight {
float3 direction;
float _padding0;
float3 color;
float _padding1;
};
struct LightData {
PointLight* pointLights;
DirectionalLight* dirLights;
uint pointLightCount;
uint dirLightCount;
PointLight getPointLight(uint idx) {
if (idx >= pointLightCount) return pointLights[0];
return pointLights[idx];
}
DirectionalLight getDirectionalLight(uint idx) {
if (idx >= dirLightCount) return dirLights[0];
return dirLights[idx];
}
};
struct PerFrameData {
}
CameraData camera;
LightData lightData;
};
[vk::binding(0, 0)] uniform ConstantBuffer<CameraData> camera;
[vk::binding(0, 1)] uniform Sampler2D texture;
uniform ParameterBlock<PerFrameData> pfd;
struct PerInstanceData {
float4x4 transform;
Sampler2D.RID textureID;
uint _padding;
float metallic;
float roughness;
float4 baseColor;
}
[[vk::push_constant]]
@ -28,23 +69,59 @@ uniform ConstantBuffer<PerInstanceData> pcb;
VertexOut VertexMain(
uint vertexId: SV_VertexID,
float3 position,
float3 color,
float3 normal,
float2 texCoord0,
float2 texCoord1,
float4 vertexColor0,
) {
float4 worldPosition = mul(pcb.transform, float4(position, 1.0f));
VertexOut output;
output.outPosition = mul(camera.proj, mul(camera.view, mul(pcb.transform, float4(position, 1.0f))));
output.screenPosition = mul(camera.proj, mul(camera.view, mul(pcb.transform, float4(position, 1.0f))));
output.vertexColor = float4(color, 1.0f);
output.texCoord0 = texCoord0 * 2.0f;
output.outPosition = mul(pfd.camera.proj, mul(pfd.camera.view, worldPosition));
output.worldPosition = worldPosition;
output.normal = mul(pcb.transform, float4(normalize(normal.rgb), 0.0f));
output.texCoord0 = texCoord0;
output.texCoord1 = texCoord1;
output.vertexColor0 = vertexColor0;
return output;
}
[shader("fragment")]
float4 FragmentMain(
float4 interpolatePosition : ScreenPosition,
float4 interpolatedColors : CoarseColor,
float2 uv0 : TexCoord0,
float4 worldPosition : WorldPosition,
float4 normal : WorldNormal,
float2 uv0 : TexCoord0,
float2 uv1 : TexCoord1,
float4 color : VertexColor,
) : SV_Target0 {
return float4(texture.Sample(uv0).rgb, 1.0f) * interpolatedColors;
float3 diffuse = 0.0f.xxx;
float3 specular = 0.0f.xxx;
for (uint i = 0; i < pfd.lightData.pointLightCount; ++i) {
PointLight pointlight = pfd.lightData.pointLights[i];
let lightPosition = pointlight.position;
let lightDisplace = worldPosition.xyz - lightPosition;
let lightDistance = length(lightDisplace);
let lightDirection = normalize(lightDisplace);
let viewDirection = normalize(worldPosition.xyz - pfd.camera.position.xyz);
let halfWayVector = normalize(-lightDirection + viewDirection);
let attenuation = (1.0f / lightDistance);
let diffuseFactor = pcb.roughness * dot(-lightDirection, normalize(normal.xyz));
diffuse += pointlight.color * diffuseFactor;
let specularFactor = (1.0f - pcb.roughness) * pow(max(dot(halfWayVector, viewDirection), 0.0f), 32.0f) * attenuation;
specular += pointlight.color * specularFactor;
}
if (let texture = pcb.textureID) {
return float4(texture.Sample(uv0).rgb, 1.0f) * pcb.baseColor * color * float4((diffuse + specular), 0.0f);
} else {
return pcb.baseColor * color * float4((diffuse + specular), 0.0f);
}
}

View File

@ -1,7 +1,7 @@

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Version 17
VisualStudioVersion = 17.13.36105.23 d17.13
VisualStudioVersion = 17.13.36105.23
MinimumVisualStudioVersion = 10.0.40219.1
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Blaze", "Blaze.vcxproj", "{92E725FE-D87B-4FDE-8371-5B2CE60945FD}"
EndProject

View File

@ -119,7 +119,7 @@
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalLibraryDirectories>C:\Users\Eon\source\repos\Blaze\vcpkg_installed\x64-windows\x64-windows\bin;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
@ -144,7 +144,7 @@
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalLibraryDirectories>C:\Users\Eon\source\repos\Blaze\vcpkg_installed\x64-windows\x64-windows\bin;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<AdditionalLibraryDirectories>%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
@ -161,6 +161,7 @@
<Message Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Compiling %(Filename).slang</Message>
<Outputs Condition="'$(Configuration)|$(Platform)'=='Release|x64'">%(Filename).spv</Outputs>
</CustomBuild>
<None Include="Assets\Shaders\Bindless.slang" />
<None Include="PLAN.md">
<SubType>
</SubType>
@ -171,22 +172,31 @@
</ItemGroup>
<ItemGroup>
<ClInclude Include="Blaze\AppState.h" />
<ClInclude Include="Blaze\BufferManager.h" />
<ClInclude Include="Blaze\EntityManager.h" />
<ClInclude Include="Blaze\Frame.h" />
<ClInclude Include="Blaze\FreeList.h" />
<ClInclude Include="Blaze\GlobalMemory.h" />
<ClInclude Include="Blaze\MacroUtils.h" />
<ClInclude Include="Blaze\MathUtil.h" />
<ClInclude Include="Blaze\MiscData.h" />
<ClInclude Include="Blaze\ModelLoader.h" />
<ClInclude Include="Blaze\RenderDevice.h" />
<ClInclude Include="Blaze\RID.h" />
<ClInclude Include="Blaze\TextureManager.h" />
<ClInclude Include="Blaze\VulkanHeader.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="Blaze\AppState.cpp" />
<ClCompile Include="Blaze\Blaze.cpp" />
<ClCompile Include="Blaze\BufferManager.cpp" />
<ClCompile Include="Blaze\CgltfImpl.cpp" />
<ClCompile Include="Blaze\EntityManager.cpp" />
<ClCompile Include="Blaze\Frame.cpp" />
<ClCompile Include="Blaze\FreeList.cpp" />
<ClCompile Include="Blaze\GlobalMemory.cpp" />
<ClCompile Include="Blaze\MiscData.cpp" />
<ClCompile Include="Blaze\ModelLoader.cpp" />
<ClCompile Include="Blaze\RenderDevice.cpp" />
<ClCompile Include="Blaze\StbImpl.cpp" />
<ClCompile Include="Blaze\TextureManager.cpp" />

View File

@ -45,6 +45,9 @@
<None Include="vcpkg-configuration.json">
<Filter>Resource Files\Config</Filter>
</None>
<None Include="Assets\Shaders\Bindless.slang">
<Filter>Resource Files\Shader Files</Filter>
</None>
</ItemGroup>
<ItemGroup>
<ClInclude Include="Blaze\AppState.h">
@ -74,6 +77,21 @@
<ClInclude Include="Blaze\TextureManager.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Blaze\VulkanHeader.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Blaze\BufferManager.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Blaze\FreeList.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Blaze\RID.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Blaze\ModelLoader.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<ClCompile Include="Blaze\AppState.cpp">
@ -106,6 +124,18 @@
<ClCompile Include="Blaze\TextureManager.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Blaze\BufferManager.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Blaze\FreeList.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Blaze\ModelLoader.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Blaze\CgltfImpl.cpp">
<Filter>Source Files\HeaderOnlyImpl</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<Image Include="Assets\Textures\container2.png">

View File

@ -53,7 +53,7 @@ AppState* AppState_Create( GlobalMemory* memory, uint32_t const width, uint32_t
return nullptr;
}
EntityManager* entityManager = EntityManager_Create( memory, renderDevice, 10 );
EntityManager* entityManager = EntityManager_Create( memory, renderDevice, 1000 );
if ( !entityManager )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "EntityManager failed to init" );

View File

@ -3,17 +3,18 @@
#include <array>
#include <cassert>
#include <functional>
#include <limits>
#include <span>
#include <volk.h>
#define SDL_MAIN_USE_CALLBACKS 1
#include <SDL3/SDL.h>
#include <SDL3/SDL_filesystem.h>
#include <SDL3/SDL_main.h>
#include <SDL3/SDL_vulkan.h>
#include "VulkanHeader.h"
#include "AppState.h"
#include "EntityManager.h"
#include "Frame.h"
@ -23,6 +24,8 @@
#include "MiscData.h"
#include "RenderDevice.h"
#include "ModelLoader.h"
constexpr uint32_t WIDTH = 1280;
constexpr uint32_t HEIGHT = 720;
constexpr uint32_t NUM_FRAMES = 3;
@ -41,69 +44,42 @@ SDL_AppResult SDL_AppInit( void** appstate, int, char** )
*appstate = AppState_Create( &Blaze::Global::g_Memory, WIDTH, HEIGHT );
if ( !*appstate ) return SDL_APP_FAILURE;
AppState& appState = *static_cast<AppState*>( *appstate );
AppState& appState = *static_cast<AppState*>( *appstate );
// TODO: Integrate this
Entity const* entity =
LoadModel( appState.renderDevice, appState.entityManager, "Assets/Models/OrientationTest.glb" );
ASSERT( entity );
// Model Setup
// modelTransform[1].position = { -1.0f, 0.0f, 0.0f };
// modelTransform[1].scale = 1.0f;
// modelTransform[1].rotation =
// DirectX::XMQuaternionRotationAxis( DirectX::XMVectorSet( 1.0f, 0.0f, 0.0f, 0.0f ), 0.0f );
// TL----TR
// | \ |
// | \ |
// | \ |
// BL----BR
//
// BL -> BR -> TL
// TL -> BR -> TR
std::array vertices = {
// Bottom Left
Vertex{
.position = { -1.0f, -1.0f, 0.0f },
.color = { 0.0f, 0.0f, 1.0f },
.texCoord0 = { 0.0f, 0.0f },
},
// Bottom Right
Vertex{
.position = { 1.0f, -1.0f, 0.0f },
.color = { 1.0f, 0.0f, 0.0f },
.texCoord0 = { 1.0f, 0.0f },
},
// Top Left
Vertex{
.position = { -1.0f, 1.0f, 0.0f },
.color = { 0.0f, 1.0f, 0.0f },
.texCoord0 = { 0.0f, 1.0f },
},
// Top Right
Vertex{
.position = { 1.0f, 1.0f, 0.0f },
.color = { 1.0f, 1.0f, 0.0f },
.texCoord0 = { 1.0f, 1.0f },
}
std::array pointLight = {
MiscData::PointLight{
.position = { 12.0f, 0.0f, 0.0f },
.range = 12,
.color = { 1.0f, 0.0f, 0.0f },
.attenuation = 1.0f,
},
MiscData::PointLight{
.position = { 0.0f, 12.0f, 0.0f },
.range = 12,
.color = { 0.0f, 1.0f, 0.0f },
.attenuation = 1.0f,
},
MiscData::PointLight{
.position = { 0.0f, 0.0f, -12.0f },
.range = 6,
.color = { 0.0f, 0.0f, 1.0f },
.attenuation = 1.0f,
},
};
Transform modelTransform = {
.position = { 1.0f, 0.0f, 0.0f },
.scale = 1.0f,
.rotation = DirectX::XMQuaternionRotationAxis( DirectX::XMVectorSet( 0.0f, 1.0f, 0.0f, 0.0f ), 0.0f ),
};
appState.miscData->lightData.pointLightCount = static_cast<uint32_t>( pointLight.size() );
for ( int i = -3; i <= 3; ++i )
{
modelTransform.position.x = static_cast<float>( i );
appState.entityManager->createEntity(
modelTransform,
vertices,
"Assets/Textures/wall.jpg",
appState.miscData->descriptorSetLayout[1],
appState.miscData->descriptorPool );
}
appState.renderDevice->bufferManager->writeToBuffer(
appState.miscData->pointLights, std::span{ pointLight.begin(), pointLight.end() } );
memcpy(
appState.miscData->cameraUniformBufferPtr + sizeof( MiscData::CameraData ),
&appState.miscData->lightData,
sizeof appState.miscData->lightData );
return SDL_APP_CONTINUE;
}
@ -128,20 +104,27 @@ SDL_AppResult SDL_AppIterate( void* appstate )
misc.previousCounter = currentCounter;
{
double deltaTimeMs = deltaTime * 1000.0;
double fps = 1.0 / deltaTime;
( void )sprintf_s<256>(
appState.sprintfBuffer, "%.2f fps %.5fms %llu -> %llu", fps, deltaTimeMs, previousCounter, currentCounter );
misc.frameTimeSum -= misc.frameTime[misc.frameTimeWriteHead];
misc.frameTime[misc.frameTimeWriteHead] = deltaTime;
misc.frameTimeSum += deltaTime;
misc.frameTimeWriteHead = ( misc.frameTimeWriteHead + 1 ) % misc.frameTimeEntryCount;
double avgDeltaTime = ( misc.frameTimeSum / misc.frameTimeEntryCount );
double fps = 1.0 / avgDeltaTime;
double avgDeltaTimeMs = 1000.0 * avgDeltaTime;
( void )sprintf_s<256>( appState.sprintfBuffer, "%.2f fps %.2f ms", fps, avgDeltaTimeMs );
SDL_SetWindowTitle( appState.window, appState.sprintfBuffer );
}
for ( Entity& entity : entityManager.iter() )
{
entity.transform().rotation = DirectX::XMQuaternionMultiply(
if ( not entity.isRoot() ) continue;
entity.transform.rotation = DirectX::XMQuaternionMultiply(
DirectX::XMQuaternionRotationAxis(
DirectX::XMVectorSet( 0.0f, 1.0f, 0.0f, 0.0f ),
DirectX::XMConvertToRadians( 60.0f ) * static_cast<float>( deltaTime ) ),
entity.transform().rotation );
entity.transform.rotation );
}
uint32_t currentImageIndex;
@ -174,8 +157,27 @@ SDL_AppResult SDL_AppIterate( void* appstate )
.float32 = { 0.0f, 0.0f, 0.0f, 1.0f },
};
VkClearDepthStencilValue constexpr static DEPTH_STENCIL_CLEAR = {
.depth = 1.0f,
.stencil = 0,
};
VK_CHECK( vkBeginCommandBuffer( cmd, &beginInfo ) );
{
VkRenderingAttachmentInfo const depthAttachmentInfo = {
.sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
.pNext = nullptr,
.imageView = currentFrame.depthView,
.imageLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL,
.resolveMode = VK_RESOLVE_MODE_NONE,
.resolveImageView = nullptr,
.resolveImageLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
.clearValue = { .depthStencil = DEPTH_STENCIL_CLEAR },
};
VkRenderingAttachmentInfo const attachmentInfo = {
.sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
.pNext = nullptr,
@ -198,7 +200,7 @@ SDL_AppResult SDL_AppIterate( void* appstate )
.viewMask = 0,
.colorAttachmentCount = 1,
.pColorAttachments = &attachmentInfo,
.pDepthAttachment = nullptr,
.pDepthAttachment = &depthAttachmentInfo,
.pStencilAttachment = nullptr,
};
@ -222,35 +224,90 @@ SDL_AppResult SDL_AppIterate( void* appstate )
// Render Something?
vkCmdBindPipeline( cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, misc.meshPipeline );
for ( Entity const& entity : entityManager.iter() )
vkCmdBindDescriptorSets(
cmd,
VK_PIPELINE_BIND_POINT_GRAPHICS,
misc.pipelineLayout,
0,
1,
&renderDevice.textureManager->descriptorSet(),
0,
nullptr );
vkCmdBindDescriptorSets(
cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, misc.pipelineLayout, 1, 1, &misc.descriptorSet, 0, nullptr );
std::function<void( Entity const&, DirectX::XMMATRIX const&, Model const* )> drawEntity =
[&]( Entity const& entity, DirectX::XMMATRIX const& parent, Model const* current )
{
VkDeviceSize constexpr offset = 0;
vkCmdBindVertexBuffers( cmd, 0, 1, &entity.mesh().vertexBuffer, &offset );
vkCmdBindDescriptorSets(
cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, misc.pipelineLayout, 0, 1, &misc.descriptorSet, 0, nullptr );
vkCmdBindDescriptorSets(
cmd,
VK_PIPELINE_BIND_POINT_GRAPHICS,
misc.pipelineLayout,
1,
1,
&entity.material().descriptorSet,
0,
nullptr );
Transform const& localTransform = entity.transform();
Transform const& localTransform = entity.transform;
DirectX::XMMATRIX worldTransform;
{
auto [x, y, z] = localTransform.position;
auto scale = localTransform.scale;
worldTransform = DirectX::XMMatrixScaling( scale, scale, scale ) *
DirectX::XMMatrixRotationQuaternion( localTransform.rotation ) *
DirectX::XMMatrixTranslation( x, y, z );
worldTransform =
DirectX::XMMatrixAffineTransformation(
localTransform.scale, DirectX::XMVectorZero(), localTransform.rotation, localTransform.translation ) *
parent;
}
if ( not entity.model.isNull() )
{
VkBuffer const vertexBuffer = renderDevice.bufferManager->fetchBuffer( entity.model.vertexBuffer ).value();
VkBuffer const indexBuffer = renderDevice.bufferManager->fetchBuffer( entity.model.indexBuffer ).value();
VkDeviceSize constexpr offset = 0;
vkCmdBindVertexBuffers( cmd, 0, 1, &vertexBuffer, &offset );
vkCmdBindIndexBuffer( cmd, indexBuffer, offset, VK_INDEX_TYPE_UINT32 );
}
vkCmdPushConstants(
cmd, misc.pipelineLayout, VK_SHADER_STAGE_ALL_GRAPHICS, 0, sizeof worldTransform, &worldTransform );
vkCmdDraw( cmd, entity.mesh().vertexCount, 1, 0, 0 );
if ( not entity.modelMesh.isNull() )
{
ASSERT( current );
for ( Primitive const& primitive : std::span{ current->primitives.data() + entity.modelMesh.primitiveStart,
entity.modelMesh.primitiveCount } )
{
byte const* materialData = nullptr;
if ( primitive.material != UINT32_MAX )
{
Material const* mat = &current->materials[primitive.material];
materialData = reinterpret_cast<byte const*>( mat );
materialData += Material::GPU_DATA_OFFSET;
}
else
{
materialData = reinterpret_cast<byte const*>( &DEFAULT_MATERIAL );
materialData += Material::GPU_DATA_OFFSET;
}
vkCmdPushConstants(
cmd,
misc.pipelineLayout,
VK_SHADER_STAGE_ALL_GRAPHICS,
sizeof worldTransform,
Material::GPU_DATA_SIZE,
materialData );
vkCmdDrawIndexed( cmd, primitive.indexCount, 1, primitive.indexStart, primitive.vertexOffset, 0 );
}
}
for ( Entity& child : entity.children() )
{
drawEntity( child, worldTransform, entity.model.isNull() ? current : &entity.model );
}
};
for ( Entity const& entity : entityManager.iter() )
{
if ( not entity.isRoot() )
{
continue;
}
drawEntity( entity, DirectX::XMMatrixIdentity(), nullptr );
}
}
vkCmdEndRendering( cmd );

337
Blaze/BufferManager.cpp Normal file
View File

@ -0,0 +1,337 @@
#include "BufferManager.h"
#include "GlobalMemory.h"
template struct RID<Buffer>;
void BufferManager::destroyBuffer( Buffer& buf )
{
if ( not buf.buffer ) return;
ASSERT( m_pRenderDevice );
uint32_t const index = buf.index;
uint32_t const innerIndex = index & INDEX_MASK;
uint32_t const generation = ( index & GENERATION_MASK ) >> GENERATION_OFFSET;
RenderDevice const& renderDevice = *m_pRenderDevice;
vmaDestroyBuffer( renderDevice.gpuAllocator, Take( buf.buffer ), Take( buf.allocation ) );
buf.size = 0;
buf.mappedData = nullptr;
buf.index = innerIndex | ( generation + 1 ) << GENERATION_OFFSET;
// NOTE: DO NOT EDIT INNER INDEX.
ASSERT( innerIndex == ( buf.index & INDEX_MASK ) and "Index should not be modified" );
ASSERT( buf.index > index and "Generation should increase." );
m_freeList.pushBack( reinterpret_cast<FreeList::Node*>( &buf ) );
--m_count;
}
Buffer& BufferManager::fetchBufferUnchecked( BufferID const& rid )
{
uint32_t const index = *reinterpret_cast<uint32_t const*>( &rid );
uint32_t const innerIndex = index & INDEX_MASK;
return m_aBuffers[innerIndex];
}
void BufferManager::writeToBufferImpl( BufferID const& rid, void const* data, size_t const size )
{
ASSERT( isValidID( rid ) );
Buffer const& buffer = fetchBufferUnchecked( rid );
ASSERT( size <= buffer.size );
memcpy( buffer.mappedData, data, size );
}
bool BufferManager::isValidID( BufferID const& rid ) const
{
uint32_t const index = *reinterpret_cast<uint32_t const*>( &rid );
uint32_t const innerIndex = index & INDEX_MASK;
if ( innerIndex > m_capacity ) return false;
return m_aBuffers[innerIndex].index == index;
}
std::optional<BufferID> BufferManager::createVertexBuffer( size_t const size )
{
if ( m_freeList.empty() )
{
return std::nullopt;
}
Buffer* bufferSlot = reinterpret_cast<Buffer*>( m_freeList.popFront() );
++m_count;
ASSERT( m_pRenderDevice );
RenderDevice const& renderDevice = *m_pRenderDevice;
VkBufferCreateInfo const bufferCreateInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = size,
.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr allocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocationInfo;
VkBuffer vertexBuffer;
VmaAllocation vertexBufferAllocation;
VK_CHECK( vmaCreateBuffer(
renderDevice.gpuAllocator,
&bufferCreateInfo,
&allocationCreateInfo,
&vertexBuffer,
&vertexBufferAllocation,
&allocationInfo ) );
// NOTE: textureSlot preserves index between uses.
uint32_t index = bufferSlot->index;
new ( bufferSlot ) Buffer{
.buffer = vertexBuffer,
.allocation = vertexBufferAllocation,
.mappedData = static_cast<std::byte*>( allocationInfo.pMappedData ),
.deviceAddress = 0,
.size = size,
.index = index,
};
// NOTE: Memory hackery to create TextureID;
return std::move( *reinterpret_cast<BufferID*>( &index ) );
}
std::optional<BufferID> BufferManager::createIndexBuffer( size_t size )
{
if ( m_freeList.empty() )
{
return std::nullopt;
}
Buffer* bufferSlot = reinterpret_cast<Buffer*>( m_freeList.popFront() );
++m_count;
ASSERT( m_pRenderDevice );
RenderDevice const& renderDevice = *m_pRenderDevice;
VkBufferCreateInfo const bufferCreateInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = size,
.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr allocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocationInfo;
VkBuffer indexBuffer;
VmaAllocation indexBufferAllocation;
VK_CHECK( vmaCreateBuffer(
renderDevice.gpuAllocator,
&bufferCreateInfo,
&allocationCreateInfo,
&indexBuffer,
&indexBufferAllocation,
&allocationInfo ) );
// NOTE: bufferSlot preserves index between uses.
uint32_t index = bufferSlot->index;
new ( bufferSlot ) Buffer{
.buffer = indexBuffer,
.allocation = indexBufferAllocation,
.mappedData = static_cast<std::byte*>( allocationInfo.pMappedData ),
.deviceAddress = 0,
.size = size,
.index = index,
};
// NOTE: Memory hackery to create BufferID;
return std::move( *reinterpret_cast<BufferID*>( &index ) );
}
std::optional<BufferID> BufferManager::createStorageBuffer( size_t size )
{
if ( m_freeList.empty() )
{
return std::nullopt;
}
Buffer* bufferSlot = reinterpret_cast<Buffer*>( m_freeList.popFront() );
++m_count;
ASSERT( m_pRenderDevice );
RenderDevice const& renderDevice = *m_pRenderDevice;
VkBufferCreateInfo const bufferCreateInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = size,
.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr allocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocationInfo;
VkBuffer storageBuffer;
VmaAllocation storageBufferAllocation;
VK_CHECK( vmaCreateBuffer(
renderDevice.gpuAllocator,
&bufferCreateInfo,
&allocationCreateInfo,
&storageBuffer,
&storageBufferAllocation,
&allocationInfo ) );
VkBufferDeviceAddressInfo const deviceAddressInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO,
.pNext = nullptr,
.buffer = storageBuffer,
};
VkDeviceAddress const deviceAddress = vkGetBufferDeviceAddress( renderDevice.device, &deviceAddressInfo );
// NOTE: bufferSlot preserves index between uses.
uint32_t index = bufferSlot->index;
new ( bufferSlot ) Buffer{
.buffer = storageBuffer,
.allocation = storageBufferAllocation,
.mappedData = static_cast<std::byte*>( allocationInfo.pMappedData ),
.deviceAddress = deviceAddress,
.size = size,
.index = index,
};
// NOTE: Memory hackery to create BufferID;
return std::move( *reinterpret_cast<BufferID*>( &index ) );
}
void BufferManager::freeBuffer( BufferID&& rid )
{
if ( not isValidID( rid ) ) return;
Buffer& buffer = fetchBufferUnchecked( rid );
destroyBuffer( buffer );
auto _ = std::move( rid );
}
std::optional<VkBuffer> BufferManager::fetchBuffer( BufferID const& rid )
{
if ( not isValidID( rid ) ) return std::nullopt;
return fetchBufferUnchecked( rid ).buffer;
}
std::optional<VkDeviceAddress> BufferManager::fetchDeviceAddress( BufferID const& rid )
{
if ( not isValidID( rid ) ) return std::nullopt;
Buffer const& buffer = fetchBufferUnchecked( rid );
if ( buffer.deviceAddress == 0 ) return std::nullopt;
return buffer.deviceAddress;
}
BufferManager::BufferManager( RenderDevice* pRenderDevice, Buffer* aBuffers, uint32_t const capacity )
: m_pRenderDevice{ pRenderDevice }, m_aBuffers{ aBuffers }, m_count{ 0 }, m_capacity{ capacity }
{
uint32_t i = 0;
for ( Buffer& tex : std::span{ m_aBuffers, m_capacity } )
{
// Default Generation is 1
tex.index = i++ | ( 1 << GENERATION_OFFSET );
m_freeList.pushFront( reinterpret_cast<FreeList::Node*>( &tex ) );
}
}
void BufferManager::destroy()
{
#if defined( _DEBUG )
if ( m_count > 0 )
{
SDL_LogError( SDL_LOG_CATEGORY_ERROR, "%u buffers still allocated.", m_count );
}
#endif
while ( not m_freeList.empty() )
{
Buffer* buf = reinterpret_cast<Buffer*>( m_freeList.popFront() );
memset( buf, 0, sizeof *buf );
}
for ( Buffer& buf : std::span{ m_aBuffers, m_count } )
{
destroyBuffer( buf );
}
}
BufferManager::~BufferManager()
{
ASSERT( not m_aBuffers );
}
BufferManager* BufferManager_Create( GlobalMemory* mem, RenderDevice* renderDevice, uint32_t maxCount )
{
Buffer* buffers = reinterpret_cast<Buffer*>( mem->allocate( maxCount * sizeof( Buffer ), alignof( Buffer ) ) );
if ( not buffers ) return nullptr;
std::byte* allocation = mem->allocate( sizeof( BufferManager ), alignof( BufferManager ) );
if ( not allocation ) return nullptr;
return new ( allocation ) BufferManager{ renderDevice, buffers, maxCount };
}

87
Blaze/BufferManager.h Normal file
View File

@ -0,0 +1,87 @@
#pragma once
#include <optional>
#include <span>
#include "FreeList.h"
#include "MacroUtils.h"
#include "RID.h"
#include "RenderDevice.h"
#include "VulkanHeader.h"
struct GlobalMemory;
struct RenderDevice;
struct Buffer
{
VkBuffer buffer;
VmaAllocation allocation;
std::byte* mappedData; // Assume the system has ReBAR/SAM enabled.
VkDeviceAddress deviceAddress;
size_t size;
uint32_t index;
};
static_assert( sizeof( Buffer ) > sizeof( FreeList::Node ) and "Buffer is used intrusively by FreeList" );
static_assert(
offsetof( Buffer, index ) >= sizeof( FreeList::Node ) and "Index should not be overwritten even in invalid state" );
extern template struct RID<Buffer>;
using BufferID = RID<Buffer>;
struct BufferManager
{
private:
constexpr static uint32_t INDEX_MASK = 0x0007FFFF;
constexpr static uint32_t GENERATION_MASK = ~INDEX_MASK;
constexpr static uint32_t GENERATION_OFFSET = 19;
static_assert(
( ( GENERATION_MASK >> GENERATION_OFFSET & 0x1 ) == 0x1 ) and
( ( GENERATION_MASK >> ( GENERATION_OFFSET - 1 ) & 0x1 ) != 0x1 ) and "Checks boundary" );
RenderDevice* m_pRenderDevice;
// Texture Manager
Buffer* m_aBuffers;
uint32_t m_count;
uint32_t m_capacity;
FreeList m_freeList;
void destroyBuffer( Buffer& buf );
Buffer& fetchBufferUnchecked( BufferID const& rid );
void writeToBufferImpl( BufferID const& rid, void const* data, size_t size );
public:
[[nodiscard]] bool isValidID( BufferID const& rid ) const;
std::optional<BufferID> createVertexBuffer( size_t size );
std::optional<BufferID> createIndexBuffer( size_t size );
std::optional<BufferID> createStorageBuffer( size_t size );
void freeBuffer( BufferID&& rid );
DEPRECATE_JULY_2025
std::optional<VkBuffer> fetchBuffer( BufferID const& rid );
std::optional<VkDeviceAddress> fetchDeviceAddress( BufferID const& rid );
void writeToBuffer( BufferID const& rid, std::ranges::contiguous_range auto const& data )
{
writeToBufferImpl(
rid,
std::ranges::data( data ),
std::ranges::size( data ) * sizeof( std::ranges::range_value_t<decltype( data )> ) );
}
//
BufferManager( RenderDevice* pRenderDevice, Buffer* aBuffers, uint32_t capacity );
void destroy();
BufferManager( BufferManager const& other ) = delete;
BufferManager( BufferManager&& other ) noexcept = delete;
BufferManager& operator=( BufferManager const& other ) = delete;
BufferManager& operator=( BufferManager&& other ) noexcept = delete;
~BufferManager();
};
BufferManager* BufferManager_Create( GlobalMemory* mem, RenderDevice* renderDevice, uint32_t maxCount );

3
Blaze/CgltfImpl.cpp Normal file
View File

@ -0,0 +1,3 @@
#define CGLTF_IMPLEMENTATION
#include <cgltf.h>

View File

@ -5,516 +5,143 @@
#include "GlobalMemory.h"
#include "RenderDevice.h"
#include <stb_image.h>
#include "Frame.h"
#include "TextureManager.h"
Entity* EntityManager::createEntity(
Transform const& transform,
std::span<Vertex> const vertices,
const char* textureFile,
VkDescriptorSetLayout layout,
VkDescriptorPool pool )
Entity& EntitySiblingIterable::Iterator::operator++()
{
ASSERT( pRenderDevice );
RenderDevice& renderDevice = *pRenderDevice;
current = current->nextSibling();
return *current;
}
Mesh mesh;
bool EntitySiblingIterable::Iterator::operator==( Iterator const& other ) const
{
return current == other.current;
}
Entity& EntitySiblingIterable::Iterator::operator*() const
{
return *current;
}
EntitySiblingIterable::Iterator EntitySiblingIterable::begin()
{
return { current };
}
EntitySiblingIterable::Iterator EntitySiblingIterable::end()
{
return {};
}
void Entity::setParent( Entity* parent )
{
ASSERT( parent );
if ( m_parent == parent ) return;
removeParent();
// Insert self into parent.
m_parent = parent;
Entity* oldHead = parent->m_firstChild;
if ( oldHead )
{
mesh.vertexCount = static_cast<uint32_t>( vertices.size() );
mesh.vertexBufferSize = static_cast<uint32_t>( vertices.size_bytes() );
// Old head is next after this
this->m_nextSibling = oldHead;
VkBufferCreateInfo const bufferCreateInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = mesh.vertexBufferSize,
.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr allocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocationInfo;
VK_CHECK( vmaCreateBuffer(
pRenderDevice->gpuAllocator,
&bufferCreateInfo,
&allocationCreateInfo,
&mesh.vertexBuffer,
&mesh.vertexBufferAllocation,
&allocationInfo ) );
if ( allocationInfo.pMappedData )
{
memcpy( allocationInfo.pMappedData, vertices.data(), vertices.size_bytes() );
}
// This is prev to old head
oldHead->m_prevSibling = this;
}
// We are the head now.
m_parent->m_firstChild = this;
}
void Entity::addChild( Entity* child )
{
child->setParent( this );
}
Material material;
void Entity::removeChild( Entity* child )
{
ASSERT( child );
child->removeParent();
}
void Entity::removeParent()
{
if ( m_parent )
{
VkSampler sampler;
uint32_t width;
uint32_t height;
uint32_t numChannels = 4;
stbi_uc* textureData;
// Replace prev of next with prev of self
if ( m_nextSibling ) m_nextSibling->m_prevSibling = m_prevSibling;
// Replace next of prev with next of self
if ( m_prevSibling )
{
int w;
int h;
int nc;
int requestedChannels = static_cast<int>( numChannels );
textureData = stbi_load( textureFile, &w, &h, &nc, requestedChannels );
ASSERT( nc <= requestedChannels );
if ( not textureData )
{
vmaDestroyBuffer( pRenderDevice->gpuAllocator, Take( mesh.vertexBuffer ), Take( mesh.vertexBufferAllocation ) );
SDL_LogError( SDL_LOG_CATEGORY_ERROR, "%s", stbi_failure_reason() );
return nullptr;
}
width = static_cast<uint32_t>( w );
height = static_cast<uint32_t>( h );
m_prevSibling->m_nextSibling = m_nextSibling;
}
else
{
// We are head of chain
m_parent->m_firstChild = m_nextSibling;
}
auto textureOpt = renderDevice.textureManager->createTexture( { width, height, 1 } );
if ( not textureOpt )
{
vmaDestroyBuffer( pRenderDevice->gpuAllocator, Take( mesh.vertexBuffer ), Take( mesh.vertexBufferAllocation ) );
SDL_LogError( SDL_LOG_CATEGORY_ERROR, "%s", stbi_failure_reason() );
stbi_image_free( textureData );
return nullptr;
}
TextureID texture = textureOpt.value();
VkImage textureImage = renderDevice.textureManager->fetchImage( texture ).value();
VkSamplerCreateInfo constexpr samplerCreateInfo = {
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.magFilter = VK_FILTER_LINEAR,
.minFilter = VK_FILTER_LINEAR,
.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR,
.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.mipLodBias = 0.0,
.anisotropyEnable = true,
.maxAnisotropy = 1.0f,
.compareEnable = false,
.compareOp = VK_COMPARE_OP_NEVER,
.minLod = 0.0f,
.maxLod = VK_LOD_CLAMP_NONE,
.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK,
.unnormalizedCoordinates = false,
};
VK_CHECK( vkCreateSampler( renderDevice.device, &samplerCreateInfo, nullptr, &sampler ) );
// Staging Buffer Create
VkBuffer stagingBuffer;
VmaAllocation stagingAllocation;
{
VkBufferCreateInfo const stagingBufferCreateInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = static_cast<VkDeviceSize>( width ) * height * numChannels * sizeof( textureData[0] ),
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr stagingAllocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocationInfo;
VK_CHECK( vmaCreateBuffer(
renderDevice.gpuAllocator,
&stagingBufferCreateInfo,
&stagingAllocationCreateInfo,
&stagingBuffer,
&stagingAllocation,
&allocationInfo ) );
if ( allocationInfo.pMappedData )
{
memcpy( allocationInfo.pMappedData, textureData, stagingBufferCreateInfo.size );
}
}
// All data is copied to stagingBuffer, don't need this.
stbi_image_free( textureData );
// Staging -> Texture transfer
{
Frame& frameInUse = renderDevice.frames[renderDevice.frameIndex];
// This should just pass.
VK_CHECK( vkWaitForFences( renderDevice.device, 1, &frameInUse.frameReadyToReuse, VK_TRUE, INT64_MAX ) );
// Reset Frame
VK_CHECK( vkResetFences( renderDevice.device, 1, &frameInUse.frameReadyToReuse ) );
VK_CHECK( vkResetCommandPool( renderDevice.device, frameInUse.commandPool, 0 ) );
VkCommandBufferBeginInfo constexpr beginInfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.pNext = nullptr,
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
.pInheritanceInfo = nullptr,
};
uint32_t mipLevels = TextureManager::calculateRequiredMipLevels( width, height, 1 );
VkImageSubresourceRange const subresourceRange = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = mipLevels,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkImageMemoryBarrier2 const creationToTransferImageBarrier = {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT,
.srcAccessMask = VK_ACCESS_2_NONE,
.dstStageMask = VK_PIPELINE_STAGE_2_COPY_BIT,
.dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = renderDevice.textureManager->fetchImage( texture ).value(),
.subresourceRange = subresourceRange,
};
VkDependencyInfo const creationToTransferDependency = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.pNext = nullptr,
.dependencyFlags = 0,
.memoryBarrierCount = 0,
.pMemoryBarriers = nullptr,
.bufferMemoryBarrierCount = 0,
.pBufferMemoryBarriers = nullptr,
.imageMemoryBarrierCount = 1,
.pImageMemoryBarriers = &creationToTransferImageBarrier,
};
std::array transferToReadyImageBarriers{
// transferToReadyImageBarrier
VkImageMemoryBarrier2{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,
.dstAccessMask = VK_ACCESS_2_SHADER_SAMPLED_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = textureImage,
.subresourceRange = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = mipLevels-1,
.baseArrayLayer = 0,
.layerCount = 1,
},
},
VkImageMemoryBarrier2{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,
.dstAccessMask = VK_ACCESS_2_SHADER_SAMPLED_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = textureImage,
.subresourceRange = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = mipLevels-1,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
}
};
VkDependencyInfo const transferToReadyDependency = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.pNext = nullptr,
.dependencyFlags = 0,
.memoryBarrierCount = 0,
.pMemoryBarriers = nullptr,
.bufferMemoryBarrierCount = 0,
.pBufferMemoryBarriers = nullptr,
.imageMemoryBarrierCount = static_cast<uint32_t>( transferToReadyImageBarriers.size() ),
.pImageMemoryBarriers = transferToReadyImageBarriers.data(),
};
VkImageSubresourceRange const mipLevelSubresource = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
};
std::array prepareNextMipLevelBarriers{
// prepareNextMipLevelSrcImageBarrier
VkImageMemoryBarrier2{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.dstAccessMask = VK_ACCESS_2_TRANSFER_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = textureImage,
.subresourceRange = mipLevelSubresource,
},
// prepareNextMipLevelDstImageBarrier
VkImageMemoryBarrier2{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_COPY_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_BLIT_BIT,
.dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = textureImage,
.subresourceRange = mipLevelSubresource,
}
};
VkDependencyInfo const prepareNextMipLevelDependency = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.pNext = nullptr,
.dependencyFlags = 0,
.memoryBarrierCount = 0,
.pMemoryBarriers = nullptr,
.bufferMemoryBarrierCount = 0,
.pBufferMemoryBarriers = nullptr,
.imageMemoryBarrierCount = static_cast<uint32_t>( prepareNextMipLevelBarriers.size() ),
.pImageMemoryBarriers = prepareNextMipLevelBarriers.data(),
};
vkBeginCommandBuffer( frameInUse.commandBuffer, &beginInfo );
{
VkImageSubresourceLayers imageSubresourceLayers = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
};
// TODO: Ensure `bufferRowLength` and `bufferImageHeight` are not required.
VkBufferImageCopy copyRegion = {
.bufferOffset = 0,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = imageSubresourceLayers,
.imageOffset = { 0, 0, 0 },
.imageExtent = { width, height, 1 }
};
// Start
vkCmdPipelineBarrier2( frameInUse.commandBuffer, &creationToTransferDependency );
// Staging -> Image L0
vkCmdCopyBufferToImage(
frameInUse.commandBuffer,
stagingBuffer,
textureImage,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1,
&copyRegion );
prepareNextMipLevelBarriers[0].subresourceRange.baseMipLevel = 0;
prepareNextMipLevelBarriers[1].subresourceRange.baseMipLevel = 1;
int32_t mipSrcWidth = static_cast<int32_t>( width );
int32_t mipSrcHeight = static_cast<int32_t>( height );
int32_t mipDstWidth = std::max( mipSrcWidth / 2, 1 );
int32_t mipDstHeight = std::max( mipSrcHeight / 2, 1 );
VkImageSubresourceLayers constexpr mipSubresourceLayers = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkImageBlit2 imageBlit = {
.sType = VK_STRUCTURE_TYPE_IMAGE_BLIT_2,
.pNext = nullptr,
.srcSubresource = mipSubresourceLayers,
.srcOffsets = { { 0, 0, 0 }, { mipSrcWidth, mipSrcHeight, 1 } },
.dstSubresource = mipSubresourceLayers,
.dstOffsets = { { 0, 0, 0 }, { mipDstWidth, mipDstHeight, 1 } },
};
imageBlit.srcSubresource.mipLevel = 0;
imageBlit.dstSubresource.mipLevel = 1;
imageBlit.srcOffsets[1].x = mipSrcWidth;
imageBlit.srcOffsets[1].y = mipSrcHeight;
imageBlit.dstOffsets[1].x = mipDstWidth;
imageBlit.dstOffsets[1].y = mipDstHeight;
VkBlitImageInfo2 blitInfo = {
.sType = VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2,
.pNext = nullptr,
.srcImage = textureImage,
.srcImageLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.dstImage = textureImage,
.dstImageLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.regionCount = 1,
.pRegions = &imageBlit,
.filter = VK_FILTER_LINEAR,
};
// MipMapping
for ( uint32_t dstMipLevel = 1; dstMipLevel < mipLevels; ++dstMipLevel )
{
vkCmdPipelineBarrier2( frameInUse.commandBuffer, &prepareNextMipLevelDependency );
vkCmdBlitImage2( frameInUse.commandBuffer, &blitInfo );
// Prep for NEXT iteration
mipSrcWidth = mipDstWidth;
mipSrcHeight = mipDstHeight;
mipDstWidth = std::max( mipSrcWidth / 2, 1 );
mipDstHeight = std::max( mipSrcHeight / 2, 1 );
imageBlit.srcSubresource.mipLevel = dstMipLevel;
imageBlit.dstSubresource.mipLevel = dstMipLevel + 1;
imageBlit.srcOffsets[1].x = mipSrcWidth;
imageBlit.srcOffsets[1].y = mipSrcHeight;
imageBlit.dstOffsets[1].x = mipDstWidth;
imageBlit.dstOffsets[1].y = mipDstHeight;
// Prep current mip level as source
prepareNextMipLevelBarriers[0].subresourceRange.baseMipLevel = dstMipLevel;
prepareNextMipLevelBarriers[1].subresourceRange.baseMipLevel = dstMipLevel + 1;
}
// End
vkCmdPipelineBarrier2( frameInUse.commandBuffer, &transferToReadyDependency );
}
vkEndCommandBuffer( frameInUse.commandBuffer );
VkSubmitInfo submitInfo = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = nullptr,
.waitSemaphoreCount = 0,
.pWaitSemaphores = nullptr,
.pWaitDstStageMask = nullptr,
.commandBufferCount = 1,
.pCommandBuffers = &frameInUse.commandBuffer,
.signalSemaphoreCount = 0,
.pSignalSemaphores = nullptr,
};
VK_CHECK( vkQueueSubmit( renderDevice.directQueue, 1, &submitInfo, frameInUse.frameReadyToReuse ) );
// Do not reset this. Else, the frame will never be available to the main loop.
VK_CHECK( vkWaitForFences( renderDevice.device, 1, &frameInUse.frameReadyToReuse, VK_TRUE, UINT64_MAX ) );
renderDevice.frameIndex = ( renderDevice.frameIndex + 1 ) % renderDevice.getNumFrames();
}
vmaDestroyBuffer( renderDevice.gpuAllocator, stagingBuffer, stagingAllocation );
VkDescriptorSetAllocateInfo const descriptorSetAllocateInfo = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
.pNext = nullptr,
.descriptorPool = pool,
.descriptorSetCount = 1,
.pSetLayouts = &layout,
};
VkDescriptorSet descriptorSet;
VK_CHECK( vkAllocateDescriptorSets( renderDevice.device, &descriptorSetAllocateInfo, &descriptorSet ) );
VkImageView textureView = renderDevice.textureManager->fetchImageView( texture ).value();
VkDescriptorImageInfo const descriptorImageInfo = {
.sampler = sampler,
.imageView = textureView,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
};
VkWriteDescriptorSet writeDescriptorSet{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.pNext = nullptr,
.dstSet = descriptorSet,
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &descriptorImageInfo,
.pBufferInfo = nullptr,
.pTexelBufferView = nullptr,
};
vkUpdateDescriptorSets( renderDevice.device, 1, &writeDescriptorSet, 0, nullptr );
material = { texture, sampler, descriptorSet };
m_nextSibling = nullptr;
m_prevSibling = nullptr;
m_parent = nullptr;
}
}
entities[count++] = Entity( transform, mesh, material );
EntitySiblingIterable Entity::children() const
{
return { m_firstChild };
}
return entities + count;
Entity::Entity( Transform const& transform )
: transform{ transform }
, model{}
, modelMesh{}
, m_parent{ nullptr }
, m_firstChild{ nullptr }
, m_prevSibling{ nullptr }
, m_nextSibling{ nullptr }
, m_flags{ 0 }
{}
Entity* EntityManager::createEntity( Transform const& transform )
{
ASSERT( count < capacity );
Entity& entity = entities[count++];
new ( &entity ) Entity{ transform };
return &entity;
}
void EntityManager::destroyEntity( Entity* entity )
{
ASSERT( entity );
if ( !entity->isInit() ) return;
VkDevice const device = pRenderDevice->device;
VmaAllocator const allocator = pRenderDevice->gpuAllocator;
VkDevice const device = pRenderDevice->device;
vkDestroySampler( device, Take( entity->material().sampler ), nullptr );
if ( not entity->model.isNull() )
{
for ( auto& material : entity->model.materials )
{
vkDestroySampler( device, Take( material.sampler ), nullptr );
pRenderDevice->textureManager->freeTexture( std::move( material.texture ) );
}
pRenderDevice->textureManager->freeTexture( entity->material().texture );
vmaDestroyBuffer( allocator, Take( entity->mesh().vertexBuffer ), Take( entity->mesh().vertexBufferAllocation ) );
pRenderDevice->bufferManager->freeBuffer( std::move( entity->model.vertexBuffer ) );
pRenderDevice->bufferManager->freeBuffer( std::move( entity->model.indexBuffer ) );
entity->model.primitives.clear();
entity->model.materials.clear();
}
// TODO: Leaking descriptor set.
entity->modelMesh = { 0, 0 };
}
void EntityManager::destroy()

View File

@ -2,94 +2,84 @@
#include <cstdint>
#include <volk.h>
#include <vma/vk_mem_alloc.h>
#include <DirectXMath.h>
#include <span>
#include "VulkanHeader.h"
// TODO: Remove this dependency
#include "BufferManager.h"
#include "ModelLoader.h"
#include "TextureManager.h"
struct Entity;
struct RenderDevice;
struct GlobalMemory;
struct Vertex
{
DirectX::XMFLOAT3 position;
DirectX::XMFLOAT3 color;
DirectX::XMFLOAT2 texCoord0;
};
struct Transform
{
DirectX::XMFLOAT3 position;
float scale;
DirectX::XMVECTOR translation;
DirectX::XMVECTOR rotation;
DirectX::XMVECTOR scale;
};
struct Mesh
struct EntitySiblingIterable
{
VkBuffer vertexBuffer;
VmaAllocation vertexBufferAllocation;
uint32_t vertexBufferSize;
uint32_t vertexCount;
};
Entity* current;
struct Material
{
TextureID texture;
VkSampler sampler; // TODO: Reuse
VkDescriptorSet descriptorSet;
struct Iterator
{
Entity* current = nullptr;
Entity& operator++();
bool operator==( Iterator const& ) const;
Entity& operator*() const;
};
Iterator begin();
Iterator end();
};
struct Entity
{
Transform transform;
Model model;
ModelMesh modelMesh;
private:
Transform m_transform;
Mesh m_mesh;
Material m_material;
Entity* m_parent; // TODO: Switch to EntityIndex.
Entity* m_firstChild;
Entity* m_prevSibling;
Entity* m_nextSibling;
uint64_t m_flags; // FIXME: Wasting space.
public:
[[nodiscard]] Transform& transform()
[[nodiscard]] bool isRoot() const
{
return m_transform;
return not m_parent;
}
[[nodiscard]] Transform const& transform() const
[[nodiscard]] Entity* parent() const
{
return m_transform;
return m_parent;
}
[[nodiscard]] Mesh& mesh()
[[nodiscard]] Entity* nextSibling() const
{
return m_mesh;
return m_nextSibling;
}
[[nodiscard]] Mesh const& mesh() const
{
return m_mesh;
}
void setParent( Entity* parent );
[[nodiscard]] Material& material()
{
return m_material;
}
void addChild( Entity* child );
[[nodiscard]] Material const& material() const
{
return m_material;
}
void removeChild( Entity* child );
[[nodiscard]] bool isInit() const
{
return m_mesh.vertexBuffer or m_material.texture;
}
// Remove self from parent
void removeParent();
Entity( Transform const& transform, Mesh const& mesh, Material const& material )
: m_transform{ transform }, m_mesh{ mesh }, m_material{ material }
{}
[[nodiscard]] EntitySiblingIterable children() const;
explicit Entity( Transform const& transform );
};
struct EntityManager
@ -130,17 +120,11 @@ struct EntityManager
}
// Make Entities return ID, make it a sparse indexing system.
// TODO: Remove the descriptor pool dependency.
Entity* createEntity(
Transform const& transform,
std::span<Vertex> vertices,
const char* textureFile,
VkDescriptorSetLayout layout,
VkDescriptorPool pool );
Entity* createEntity( Transform const& transform );
void destroyEntity( Entity* entity );
void destroyEntity( Entity* entity );
void destroy();
void destroy();
~EntityManager();
};

View File

@ -15,12 +15,18 @@ Frame::Frame(
VkCommandBuffer const commandBuffer,
VkSemaphore const imageAcquiredSemaphore,
VkSemaphore const renderFinishedSemaphore,
VkFence const frameReadyToReuse )
VkFence const frameReadyToReuse,
VkImage const depthImage,
VmaAllocation const depthAllocation,
VkImageView const depthView )
: commandPool{ commandPool }
, commandBuffer{ commandBuffer }
, imageAcquiredSemaphore{ imageAcquiredSemaphore }
, renderFinishedSemaphore{ renderFinishedSemaphore }
, frameReadyToReuse{ frameReadyToReuse }
, depthImage{ depthImage }
, depthAllocation{ depthAllocation }
, depthView{ depthView }
{}
void Frame::destroy( RenderDevice const& renderDevice )
@ -29,6 +35,9 @@ void Frame::destroy( RenderDevice const& renderDevice )
VkDevice const device = renderDevice.device;
vkDestroyImageView( device, Take( depthView ), nullptr );
vmaDestroyImage( renderDevice.gpuAllocator, Take( depthImage ), Take( depthAllocation ) );
vkDestroyCommandPool( device, Take( commandPool ), nullptr );
vkDestroyFence( device, Take( frameReadyToReuse ), nullptr );
vkDestroySemaphore( device, Take( imageAcquiredSemaphore ), nullptr );
@ -41,7 +50,12 @@ Frame::~Frame()
ASSERT( !isInit() );
}
void Frame_Create( Frame* frame, VkDevice const device, uint32_t const directQueueFamilyIndex )
void Frame_Create(
Frame* frame,
VkDevice const device,
VmaAllocator const gpuAllocator,
uint32_t const directQueueFamilyIndex,
VkExtent2D const swapchainExtent )
{
VkCommandPool commandPool;
VkCommandBuffer commandBuffer;
@ -49,6 +63,10 @@ void Frame_Create( Frame* frame, VkDevice const device, uint32_t const directQue
VkSemaphore renderFinishedSemaphore;
VkFence frameReadyToReuse;
VkImage depthImage;
VmaAllocation depthAllocation;
VkImageView depthView;
{
VkCommandPoolCreateInfo const commandPoolCreateInfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
@ -83,9 +101,74 @@ void Frame_Create( Frame* frame, VkDevice const device, uint32_t const directQue
VK_CHECK( vkCreateFence( device, &fenceCreateInfo, nullptr, &frameReadyToReuse ) );
}
{
VkImageCreateInfo const depthImageCreateInfo = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.imageType = VK_IMAGE_TYPE_2D,
.format = VK_FORMAT_D32_SFLOAT,
.extent = { swapchainExtent.width, swapchainExtent.height, 1 },
.mipLevels = 1,
.arrayLayers = 1,
.samples = VK_SAMPLE_COUNT_1_BIT,
.tiling = VK_IMAGE_TILING_OPTIMAL,
.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED
};
VmaAllocationCreateInfo constexpr depthAllocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
.usage = VMA_MEMORY_USAGE_GPU_ONLY,
.requiredFlags = 0,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VK_CHECK( vmaCreateImage(
gpuAllocator, &depthImageCreateInfo, &depthAllocationCreateInfo, &depthImage, &depthAllocation, nullptr ) );
VkImageSubresourceRange constexpr subresourceRange = {
.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkComponentMapping constexpr componentMapping = {
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
.b = VK_COMPONENT_SWIZZLE_IDENTITY,
.a = VK_COMPONENT_SWIZZLE_IDENTITY,
};
VkImageViewCreateInfo const imageViewCreateInfo = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.image = depthImage,
.viewType = VK_IMAGE_VIEW_TYPE_2D,
.format = depthImageCreateInfo.format,
.components = componentMapping,
.subresourceRange = subresourceRange,
};
VK_CHECK( vkCreateImageView( device, &imageViewCreateInfo, nullptr, &depthView ) );
}
frame->commandPool = commandPool;
frame->commandBuffer = commandBuffer;
frame->imageAcquiredSemaphore = imageAcquiredSemaphore;
frame->renderFinishedSemaphore = renderFinishedSemaphore;
frame->frameReadyToReuse = frameReadyToReuse;
frame->depthImage = depthImage;
frame->depthView = depthView;
frame->depthAllocation = depthAllocation;
}

View File

@ -1,7 +1,8 @@
#pragma once
#include <utility>
#include <volk.h>
#include "VulkanHeader.h"
struct RenderDevice;
@ -13,6 +14,10 @@ struct Frame
VkSemaphore renderFinishedSemaphore;
VkFence frameReadyToReuse;
VkImage depthImage;
VmaAllocation depthAllocation;
VkImageView depthView;
[[nodiscard]] bool isInit() const;
Frame(
@ -20,7 +25,10 @@ struct Frame
VkCommandBuffer commandBuffer,
VkSemaphore imageAcquiredSemaphore,
VkSemaphore renderFinishedSemaphore,
VkFence frameReadyToReuse );
VkFence frameReadyToReuse,
VkImage depthImage,
VmaAllocation depthAllocation,
VkImageView depthView );
void destroy( RenderDevice const& renderDevice );
@ -32,4 +40,9 @@ struct Frame
~Frame();
};
void Frame_Create( Frame* frame, VkDevice device, uint32_t directQueueFamilyIndex );
void Frame_Create(
Frame* frame,
VkDevice device,
VmaAllocator gpuAllocator,
uint32_t directQueueFamilyIndex,
VkExtent2D swapchainExtent );

72
Blaze/FreeList.cpp Normal file
View File

@ -0,0 +1,72 @@
#include "FreeList.h"
FreeList::Iterator& FreeList::Iterator::operator++()
{
pIter = pIter->pNext;
return *this;
}
bool FreeList::Iterator::operator==( Iterator const& other ) const
{
return this->pIter == other.pIter;
}
FreeList::Node& FreeList::Iterator::operator*()
{
return *pIter;
}
FreeList::FreeList() : m_head{ .pNext = &m_tail, .pPrev = nullptr }, m_tail{ .pNext = nullptr, .pPrev = &m_head }
{}
void FreeList::pushBack( Node* pNode )
{
Node* prev = m_tail.pPrev;
// Set prev as previous of pNode
prev->pNext = pNode;
pNode->pPrev = prev;
// Set tail as next of pNode
pNode->pNext = &m_tail;
m_tail.pPrev = pNode;
}
void FreeList::pushFront( Node* pNode )
{
Node* next = m_head.pNext;
// Set next as next of pNode
next->pPrev = pNode;
pNode->pNext = next;
// Set head as prev of pNode
pNode->pPrev = &m_head;
m_head.pNext = pNode;
}
FreeList::Node* FreeList::popFront()
{
ASSERT( not empty() );
Node* element = m_head.pNext;
element->pPrev->pNext = element->pNext;
element->pNext->pPrev = element->pPrev;
return element;
}
bool FreeList::empty() const
{
return m_head.pNext == &m_tail;
}
FreeList::Iterator FreeList::begin()
{
return { m_head.pNext };
}
FreeList::Iterator FreeList::end()
{
return { &m_tail };
}

42
Blaze/FreeList.h Normal file
View File

@ -0,0 +1,42 @@
#pragma once
#include "MacroUtils.h"
struct FreeList
{
struct Node
{
Node* pNext;
Node* pPrev;
};
struct Iterator
{
Node* pIter;
Iterator& operator++();
bool operator==( Iterator const& other ) const;
Node& operator*();
};
private:
Node m_head;
Node m_tail;
public:
FreeList();
void pushBack( Node* pNode );
void pushFront( Node* pNode );
Node* popFront();
[[nodiscard]] bool empty() const;
Iterator begin();
Iterator end();
FreeList( FreeList&& ) = delete;
FreeList( FreeList const& ) = delete;
FreeList& operator=( FreeList const& ) = delete;
FreeList& operator=( FreeList&& ) = delete;
~FreeList() = default;
};

View File

@ -25,7 +25,7 @@ std::byte* GlobalMemory::allocate( size_t const size )
std::byte* retVal = memory;
memset( retVal, 0, size );
memory += size;
memory += size;
available -= size;
SDL_LogInfo(
SDL_LOG_CATEGORY_SYSTEM,

View File

@ -2,6 +2,8 @@
#include <utility>
using byte = std::byte;
template <std::totally_ordered T>
T Clamp( T const val, T const minVal, T const maxVal )
{

View File

@ -43,15 +43,7 @@ bool MiscData::init( RenderDevice const& renderDevice )
.binding = 0,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT,
.pImmutableSamplers = nullptr,
};
VkDescriptorSetLayoutBinding constexpr perMaterialDescriptorBinding{
.binding = 0,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT,
.pImmutableSamplers = nullptr,
};
@ -62,30 +54,26 @@ bool MiscData::init( RenderDevice const& renderDevice )
.bindingCount = 1,
.pBindings = &perFrameDescriptorBinding,
};
VK_CHECK( vkCreateDescriptorSetLayout(
device, &perFrameDescriptorSetLayoutCreateInfo, nullptr, &descriptorSetLayout[0] ) );
VkDescriptorSetLayoutCreateInfo perMaterialDescriptorSetLayoutCreateInfo = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.bindingCount = 1,
.pBindings = &perMaterialDescriptorBinding,
};
VK_CHECK( vkCreateDescriptorSetLayout(
device, &perMaterialDescriptorSetLayoutCreateInfo, nullptr, &descriptorSetLayout[1] ) );
VK_CHECK(
vkCreateDescriptorSetLayout( device, &perFrameDescriptorSetLayoutCreateInfo, nullptr, &descriptorSetLayout ) );
VkPushConstantRange const pushConstantRange = {
.stageFlags = VK_SHADER_STAGE_ALL_GRAPHICS,
.offset = 0,
.size = sizeof( DirectX::XMMATRIX ),
.size = sizeof( DirectX::XMMATRIX ) + Material::GPU_DATA_SIZE,
};
std::array const descriptorSetLayouts = {
renderDevice.textureManager->descriptorLayout(),
descriptorSetLayout,
};
VkPipelineLayoutCreateInfo const pipelineLayoutCreateInfo = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.setLayoutCount = static_cast<uint32_t>( descriptorSetLayout.size() ),
.pSetLayouts = descriptorSetLayout.data(),
.setLayoutCount = static_cast<uint32_t>( descriptorSetLayouts.size() ),
.pSetLayouts = descriptorSetLayouts.data(),
.pushConstantRangeCount = 1,
.pPushConstantRanges = &pushConstantRange,
};
@ -130,7 +118,7 @@ bool MiscData::init( RenderDevice const& renderDevice )
.location = 1,
.binding = 0,
.format = VK_FORMAT_R32G32B32_SFLOAT,
.offset = offsetof( Vertex, color ),
.offset = offsetof( Vertex, normal ),
},
VkVertexInputAttributeDescription{
.location = 2,
@ -138,6 +126,18 @@ bool MiscData::init( RenderDevice const& renderDevice )
.format = VK_FORMAT_R32G32_SFLOAT,
.offset = offsetof( Vertex, texCoord0 ),
},
VkVertexInputAttributeDescription{
.location = 3,
.binding = 0,
.format = VK_FORMAT_R32G32_SFLOAT,
.offset = offsetof( Vertex, texCoord1 ),
},
VkVertexInputAttributeDescription{
.location = 4,
.binding = 0,
.format = VK_FORMAT_R32G32B32A32_SFLOAT,
.offset = offsetof( Vertex, color0 ),
},
};
VkPipelineVertexInputStateCreateInfo const vertexInputState = {
@ -154,7 +154,7 @@ bool MiscData::init( RenderDevice const& renderDevice )
.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
.primitiveRestartEnable = VK_FALSE,
};
@ -207,9 +207,9 @@ bool MiscData::init( RenderDevice const& renderDevice )
.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.depthTestEnable = VK_FALSE,
.depthWriteEnable = VK_FALSE,
.depthCompareOp = VK_COMPARE_OP_ALWAYS,
.depthTestEnable = VK_TRUE,
.depthWriteEnable = VK_TRUE,
.depthCompareOp = VK_COMPARE_OP_LESS,
.depthBoundsTestEnable = VK_FALSE,
.stencilTestEnable = VK_FALSE,
.front = {},
@ -255,6 +255,7 @@ bool MiscData::init( RenderDevice const& renderDevice )
.sType = VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO_KHR,
.colorAttachmentCount = 1,
.pColorAttachmentFormats = &renderDevice.swapchainFormat,
.depthAttachmentFormat = VK_FORMAT_D32_SFLOAT,
};
VkGraphicsPipelineCreateInfo const graphicsPipelineCreateInfo = {
@ -288,14 +289,37 @@ bool MiscData::init( RenderDevice const& renderDevice )
// Camera
{
cameraPosition = DirectX::XMVectorSet( 0.0f, 0.0f, -4.0f, 1.0f );
cameraTarget = DirectX::XMVectorSet( 0.0f, 0.0f, 0.0f, 1.0f );
cameraUp = DirectX::XMVectorSet( 0.0f, 1.0f, 0.0f, 1.0f );
cameraData.viewMatrix = DirectX::XMMatrixLookAtLH( cameraPosition, cameraTarget, cameraUp );
cameraData.cameraPosition = DirectX::XMVectorSet( 0.0f, 20.0f, -20.0f, 1.0f );
cameraTarget = DirectX::XMVectorSet( 0.0f, 0.0f, 0.0f, 1.0f );
cameraUp = DirectX::XMVectorSet( 0.0f, 1.0f, 0.0f, 1.0f );
cameraData.viewMatrix = DirectX::XMMatrixLookAtLH( cameraData.cameraPosition, cameraTarget, cameraUp );
cameraData.projectionMatrix =
DirectX::XMMatrixPerspectiveFovLH( DirectX::XMConvertToRadians( 70.0f ), 16.0f / 9.0f, 0.1f, 1000.0f );
cameraUniformBufferSize = sizeof( CameraData );
cameraUniformBufferSize = sizeof( CameraData ) + sizeof( LightData );
}
// Lights
{
auto pointLightsValue = renderDevice.bufferManager->createStorageBuffer( 10 * sizeof( PointLight ) );
if ( !pointLightsValue ) return false;
pointLights = std::move( pointLightsValue.value() );
auto dirLightsValue = renderDevice.bufferManager->createStorageBuffer( 10 * sizeof( DirectionalLight ) );
if ( !dirLightsValue ) return false;
directionalLights = std::move( dirLightsValue.value() );
lightData.pointLights = renderDevice.bufferManager->fetchDeviceAddress( pointLights ).value();
lightData.directionalLights = renderDevice.bufferManager->fetchDeviceAddress( directionalLights ).value();
lightData.dirLightCount = 0;
lightData.pointLightCount = 0;
}
// Uniform Buffer
{
VkBufferCreateInfo const bufferCreateInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
@ -331,8 +355,9 @@ bool MiscData::init( RenderDevice const& renderDevice )
if ( allocationInfo.pMappedData )
{
memcpy( allocationInfo.pMappedData, &cameraData, sizeof cameraData );
cameraUniformBufferPtr = static_cast<uint8_t*>( allocationInfo.pMappedData );
memcpy( cameraUniformBufferPtr, &cameraData, sizeof cameraData );
memcpy( cameraUniformBufferPtr + sizeof cameraData, &lightData, sizeof lightData );
}
}
@ -364,7 +389,7 @@ bool MiscData::init( RenderDevice const& renderDevice )
.pNext = nullptr,
.descriptorPool = descriptorPool,
.descriptorSetCount = 1,
.pSetLayouts = &descriptorSetLayout[0],
.pSetLayouts = &descriptorSetLayout,
};
VK_CHECK( vkAllocateDescriptorSets( device, &descriptorSetAllocateInfo, &descriptorSet ) );
@ -372,7 +397,7 @@ bool MiscData::init( RenderDevice const& renderDevice )
VkDescriptorBufferInfo const descriptorBufferInfo = {
.buffer = cameraUniformBuffer,
.offset = 0,
.range = sizeof CameraData,
.range = cameraUniformBufferSize,
};
std::array writeDescriptorSets = {
@ -457,6 +482,12 @@ bool MiscData::init( RenderDevice const& renderDevice )
};
}
// Frame Time
frameTimeEntryCount = 16;
memset( frameTime, 0, frameTimeEntryCount * sizeof( float ) );
frameTimeSum = 0;
frameTimeWriteHead = 0;
return true;
}
@ -467,8 +498,10 @@ void MiscData::destroy( RenderDevice const& renderDevice )
vkDestroyDescriptorPool( device, Take( descriptorPool ), nullptr );
vmaDestroyBuffer( renderDevice.gpuAllocator, Take( cameraUniformBuffer ), Take( cameraUniformBufferAllocation ) );
renderDevice.bufferManager->freeBuffer( std::move( pointLights ) );
renderDevice.bufferManager->freeBuffer( std::move( directionalLights ) );
vkDestroyPipeline( device, Take( meshPipeline ), nullptr );
vkDestroyPipelineLayout( device, Take( pipelineLayout ), nullptr );
vkDestroyDescriptorSetLayout( device, Take( descriptorSetLayout[1] ), nullptr );
vkDestroyDescriptorSetLayout( device, Take( descriptorSetLayout[0] ), nullptr );
vkDestroyDescriptorSetLayout( device, Take( descriptorSetLayout ), nullptr );
}

View File

@ -1,12 +1,13 @@
#pragma once
#include <array>
#include <volk.h>
#include <vma/vk_mem_alloc.h>
#include "VulkanHeader.h"
#include <DirectXMath.h>
#include "BufferManager.h"
struct GlobalMemory;
struct RenderDevice;
@ -16,32 +17,64 @@ struct MiscData
{
DirectX::XMMATRIX viewMatrix;
DirectX::XMMATRIX projectionMatrix;
DirectX::XMVECTOR cameraPosition;
};
uint64_t previousCounter;
struct PointLight
{
DirectX::XMFLOAT3 position;
float range;
DirectX::XMFLOAT3 color;
float attenuation;
};
std::array<VkDescriptorSetLayout, 2> descriptorSetLayout;
VkPipelineLayout pipelineLayout;
VkPipeline meshPipeline;
struct DirectionalLight
{
DirectX::XMFLOAT3 direction;
float _padding0;
DirectX::XMFLOAT3 color;
float _padding1;
};
uint64_t _padding; // TODO: Optimize out?
struct LightData
{
VkDeviceAddress pointLights;
VkDeviceAddress directionalLights;
uint32_t pointLightCount;
uint32_t dirLightCount;
};
DirectX::XMVECTOR cameraPosition;
DirectX::XMVECTOR cameraTarget;
DirectX::XMVECTOR cameraUp;
CameraData cameraData;
VkBuffer cameraUniformBuffer;
VmaAllocation cameraUniformBufferAllocation;
size_t cameraUniformBufferSize;
uint8_t* cameraUniformBufferPtr;
VkDescriptorPool descriptorPool;
VkDescriptorSet descriptorSet;
uint64_t previousCounter;
VkImageMemoryBarrier2 acquireToRenderBarrier;
VkDependencyInfo acquireToRenderDependency;
VkImageMemoryBarrier2 renderToPresentBarrier;
VkDependencyInfo renderToPresentDependency;
VkDescriptorSetLayout descriptorSetLayout;
VkPipelineLayout pipelineLayout;
VkPipeline meshPipeline;
bool init( RenderDevice const& renderDevice );
void destroy( RenderDevice const& renderDevice );
DirectX::XMVECTOR cameraTarget;
DirectX::XMVECTOR cameraUp;
CameraData cameraData;
BufferID pointLights;
BufferID directionalLights;
LightData lightData;
VkBuffer cameraUniformBuffer;
VmaAllocation cameraUniformBufferAllocation;
size_t cameraUniformBufferSize;
uint8_t* cameraUniformBufferPtr;
VkDescriptorPool descriptorPool;
VkDescriptorSet descriptorSet;
VkImageMemoryBarrier2 acquireToRenderBarrier;
VkDependencyInfo acquireToRenderDependency;
VkImageMemoryBarrier2 renderToPresentBarrier;
VkDependencyInfo renderToPresentDependency;
double frameTime[16];
double frameTimeSum;
uint8_t frameTimeWriteHead;
uint8_t frameTimeEntryCount;
bool init( RenderDevice const& renderDevice );
void destroy( RenderDevice const& renderDevice );
};

755
Blaze/ModelLoader.cpp Normal file
View File

@ -0,0 +1,755 @@
#include "ModelLoader.h"
#include <algorithm>
#include <array>
#include <memory_resource>
#include <string_view>
#include <DirectXMath.h>
#include <SDL3/SDL_log.h>
#include <cgltf.h>
#include <stb_image.h>
#include "EntityManager.h"
#include "Frame.h"
#include "GlobalMemory.h"
#include "MacroUtils.h"
#include "MathUtil.h"
// TODO: Cache materials while loading.
uint32_t ProcessMaterial( RenderDevice* renderDevice, Model* model, cgltf_material const& material )
{
ASSERT( material.has_pbr_metallic_roughness );
DirectX::XMFLOAT4 const baseColorFactor = DirectX::XMFLOAT4{ material.pbr_metallic_roughness.base_color_factor };
VkSampler sampler = nullptr;
TextureID baseColorTexture;
if ( material.pbr_metallic_roughness.base_color_texture.texture )
{
cgltf_image* baseColorImage = material.pbr_metallic_roughness.base_color_texture.texture->image;
{
byte* data;
if ( baseColorImage->buffer_view->data )
{
data = static_cast<byte*>( baseColorImage->buffer_view->data );
}
else
{
data = static_cast<byte*>( baseColorImage->buffer_view->buffer->data ) + baseColorImage->buffer_view->offset;
}
size_t size = baseColorImage->buffer_view->size;
uint32_t width;
uint32_t height;
uint32_t numChannels = 4;
stbi_uc* textureData;
{
int w;
int h;
int nc;
int requestedChannels = static_cast<int>( numChannels );
textureData = stbi_load_from_memory(
reinterpret_cast<stbi_uc const*>( data ), static_cast<int>( size ), &w, &h, &nc, requestedChannels );
ASSERT( nc <= requestedChannels );
if ( not textureData )
{
return UINT32_MAX;
}
width = static_cast<uint32_t>( w );
height = static_cast<uint32_t>( h );
}
VkSamplerCreateInfo constexpr samplerCreateInfo = {
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.magFilter = VK_FILTER_LINEAR,
.minFilter = VK_FILTER_LINEAR,
.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR,
.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.mipLodBias = 0.0,
.anisotropyEnable = true,
.maxAnisotropy = 1.0f,
.compareEnable = false,
.compareOp = VK_COMPARE_OP_NEVER,
.minLod = 0.0f,
.maxLod = VK_LOD_CLAMP_NONE,
.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK,
.unnormalizedCoordinates = false,
};
VK_CHECK( vkCreateSampler( renderDevice->device, &samplerCreateInfo, nullptr, &sampler ) );
auto textureOpt = renderDevice->textureManager->createTexture( { width, height, 1 }, sampler );
if ( not textureOpt )
{
return UINT32_MAX;
}
baseColorTexture = std::move( textureOpt.value() );
VkImage textureImage = renderDevice->textureManager->fetchImage( baseColorTexture ).value();
// Staging Buffer Create
VkBuffer stagingBuffer;
VmaAllocation stagingAllocation;
{
VkBufferCreateInfo const stagingBufferCreateInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = static_cast<VkDeviceSize>( width ) * height * numChannels * sizeof( textureData[0] ),
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr stagingAllocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocationInfo;
VK_CHECK( vmaCreateBuffer(
renderDevice->gpuAllocator,
&stagingBufferCreateInfo,
&stagingAllocationCreateInfo,
&stagingBuffer,
&stagingAllocation,
&allocationInfo ) );
if ( allocationInfo.pMappedData )
{
memcpy( allocationInfo.pMappedData, textureData, stagingBufferCreateInfo.size );
}
}
// All data is copied to stagingBuffer, don't need this.
stbi_image_free( textureData );
// Staging -> Texture transfer
{
Frame& frameInUse = renderDevice->frames[renderDevice->frameIndex];
// This should just pass.
VK_CHECK( vkWaitForFences( renderDevice->device, 1, &frameInUse.frameReadyToReuse, VK_TRUE, INT64_MAX ) );
// Reset Frame
VK_CHECK( vkResetFences( renderDevice->device, 1, &frameInUse.frameReadyToReuse ) );
VK_CHECK( vkResetCommandPool( renderDevice->device, frameInUse.commandPool, 0 ) );
VkCommandBufferBeginInfo constexpr beginInfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.pNext = nullptr,
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
.pInheritanceInfo = nullptr,
};
uint32_t mipLevels = TextureManager::calculateRequiredMipLevels( width, height, 1 );
VkImageSubresourceRange const subresourceRange = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = mipLevels,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkImageMemoryBarrier2 const creationToTransferImageBarrier = {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT,
.srcAccessMask = VK_ACCESS_2_NONE,
.dstStageMask = VK_PIPELINE_STAGE_2_COPY_BIT,
.dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = renderDevice->textureManager->fetchImage( baseColorTexture ).value(),
.subresourceRange = subresourceRange,
};
VkDependencyInfo const creationToTransferDependency = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.pNext = nullptr,
.dependencyFlags = 0,
.memoryBarrierCount = 0,
.pMemoryBarriers = nullptr,
.bufferMemoryBarrierCount = 0,
.pBufferMemoryBarriers = nullptr,
.imageMemoryBarrierCount = 1,
.pImageMemoryBarriers = &creationToTransferImageBarrier,
};
std::array transferToReadyImageBarriers{
// transferToReadyImageBarrier
VkImageMemoryBarrier2{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,
.dstAccessMask = VK_ACCESS_2_SHADER_SAMPLED_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = textureImage,
.subresourceRange = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = mipLevels-1,
.baseArrayLayer = 0,
.layerCount = 1,
},
},
VkImageMemoryBarrier2{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,
.dstAccessMask = VK_ACCESS_2_SHADER_SAMPLED_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = textureImage,
.subresourceRange = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = mipLevels-1,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
}
};
VkDependencyInfo const transferToReadyDependency = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.pNext = nullptr,
.dependencyFlags = 0,
.memoryBarrierCount = 0,
.pMemoryBarriers = nullptr,
.bufferMemoryBarrierCount = 0,
.pBufferMemoryBarriers = nullptr,
.imageMemoryBarrierCount = static_cast<uint32_t>( transferToReadyImageBarriers.size() ),
.pImageMemoryBarriers = transferToReadyImageBarriers.data(),
};
VkImageSubresourceRange const mipLevelSubresource = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
};
std::array prepareNextMipLevelBarriers{
// prepareNextMipLevelSrcImageBarrier
VkImageMemoryBarrier2{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.dstAccessMask = VK_ACCESS_2_TRANSFER_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = textureImage,
.subresourceRange = mipLevelSubresource,
},
// prepareNextMipLevelDstImageBarrier
VkImageMemoryBarrier2{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_COPY_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_BLIT_BIT,
.dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = textureImage,
.subresourceRange = mipLevelSubresource,
}
};
VkDependencyInfo const prepareNextMipLevelDependency = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.pNext = nullptr,
.dependencyFlags = 0,
.memoryBarrierCount = 0,
.pMemoryBarriers = nullptr,
.bufferMemoryBarrierCount = 0,
.pBufferMemoryBarriers = nullptr,
.imageMemoryBarrierCount = static_cast<uint32_t>( prepareNextMipLevelBarriers.size() ),
.pImageMemoryBarriers = prepareNextMipLevelBarriers.data(),
};
vkBeginCommandBuffer( frameInUse.commandBuffer, &beginInfo );
{
VkImageSubresourceLayers imageSubresourceLayers = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
};
// TODO: Ensure `bufferRowLength` and `bufferImageHeight` are not required.
VkBufferImageCopy copyRegion = {
.bufferOffset = 0,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = imageSubresourceLayers,
.imageOffset = { 0, 0, 0 },
.imageExtent = { width, height, 1 }
};
// Start
vkCmdPipelineBarrier2( frameInUse.commandBuffer, &creationToTransferDependency );
// Staging -> Image L0
vkCmdCopyBufferToImage(
frameInUse.commandBuffer,
stagingBuffer,
textureImage,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1,
&copyRegion );
prepareNextMipLevelBarriers[0].subresourceRange.baseMipLevel = 0;
prepareNextMipLevelBarriers[1].subresourceRange.baseMipLevel = 1;
int32_t mipSrcWidth = static_cast<int32_t>( width );
int32_t mipSrcHeight = static_cast<int32_t>( height );
int32_t mipDstWidth = std::max( mipSrcWidth / 2, 1 );
int32_t mipDstHeight = std::max( mipSrcHeight / 2, 1 );
VkImageSubresourceLayers constexpr mipSubresourceLayers = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkImageBlit2 imageBlit = {
.sType = VK_STRUCTURE_TYPE_IMAGE_BLIT_2,
.pNext = nullptr,
.srcSubresource = mipSubresourceLayers,
.srcOffsets = { { 0, 0, 0 }, { mipSrcWidth, mipSrcHeight, 1 } },
.dstSubresource = mipSubresourceLayers,
.dstOffsets = { { 0, 0, 0 }, { mipDstWidth, mipDstHeight, 1 } },
};
imageBlit.srcSubresource.mipLevel = 0;
imageBlit.dstSubresource.mipLevel = 1;
imageBlit.srcOffsets[1].x = mipSrcWidth;
imageBlit.srcOffsets[1].y = mipSrcHeight;
imageBlit.dstOffsets[1].x = mipDstWidth;
imageBlit.dstOffsets[1].y = mipDstHeight;
VkBlitImageInfo2 blitInfo = {
.sType = VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2,
.pNext = nullptr,
.srcImage = textureImage,
.srcImageLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.dstImage = textureImage,
.dstImageLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.regionCount = 1,
.pRegions = &imageBlit,
.filter = VK_FILTER_LINEAR,
};
// MipMapping
for ( uint32_t dstMipLevel = 1; dstMipLevel < mipLevels; ++dstMipLevel )
{
vkCmdPipelineBarrier2( frameInUse.commandBuffer, &prepareNextMipLevelDependency );
vkCmdBlitImage2( frameInUse.commandBuffer, &blitInfo );
// Prep for NEXT iteration
mipSrcWidth = mipDstWidth;
mipSrcHeight = mipDstHeight;
mipDstWidth = std::max( mipSrcWidth / 2, 1 );
mipDstHeight = std::max( mipSrcHeight / 2, 1 );
imageBlit.srcSubresource.mipLevel = dstMipLevel;
imageBlit.dstSubresource.mipLevel = dstMipLevel + 1;
imageBlit.srcOffsets[1].x = mipSrcWidth;
imageBlit.srcOffsets[1].y = mipSrcHeight;
imageBlit.dstOffsets[1].x = mipDstWidth;
imageBlit.dstOffsets[1].y = mipDstHeight;
// Prep current mip level as source
prepareNextMipLevelBarriers[0].subresourceRange.baseMipLevel = dstMipLevel;
prepareNextMipLevelBarriers[1].subresourceRange.baseMipLevel = dstMipLevel + 1;
}
// End
vkCmdPipelineBarrier2( frameInUse.commandBuffer, &transferToReadyDependency );
}
vkEndCommandBuffer( frameInUse.commandBuffer );
VkSubmitInfo submitInfo = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = nullptr,
.waitSemaphoreCount = 0,
.pWaitSemaphores = nullptr,
.pWaitDstStageMask = nullptr,
.commandBufferCount = 1,
.pCommandBuffers = &frameInUse.commandBuffer,
.signalSemaphoreCount = 0,
.pSignalSemaphores = nullptr,
};
VK_CHECK( vkQueueSubmit( renderDevice->directQueue, 1, &submitInfo, frameInUse.frameReadyToReuse ) );
// Do not reset this. Else, the frame will never be available to the main loop.
VK_CHECK( vkWaitForFences( renderDevice->device, 1, &frameInUse.frameReadyToReuse, VK_TRUE, UINT64_MAX ) );
renderDevice->frameIndex = ( renderDevice->frameIndex + 1 ) % renderDevice->getNumFrames();
}
vmaDestroyBuffer( renderDevice->gpuAllocator, stagingBuffer, stagingAllocation );
}
}
float const metallic = material.pbr_metallic_roughness.metallic_factor;
float const roughness = material.pbr_metallic_roughness.roughness_factor;
uint32_t const materialIdx = static_cast<uint32_t>( model->materials.size() );
model->materials.push_back( { sampler, std::move( baseColorTexture ), {}, roughness, metallic, baseColorFactor } );
return materialIdx;
}
ModelMesh ProcessMesh(
RenderDevice* renderDevice,
Model* model,
std::pmr::vector<Vertex>* pVertices,
std::pmr::vector<uint32_t>* pIndices,
cgltf_mesh const& mesh )
{
using namespace std::string_view_literals;
uint32_t const primitiveStart = static_cast<uint32_t>( model->primitives.size() );
uint32_t const primitiveCount = static_cast<uint32_t>( mesh.primitives_count );
cgltf_primitive const* primitives = mesh.primitives;
for ( uint32_t primitiveIndex = 0; primitiveIndex < mesh.primitives_count; ++primitiveIndex )
{
// VertexStart is per-primitive
int32_t const vertexStart = static_cast<int32_t>( pVertices->size() );
cgltf_primitive const& primitive = primitives[primitiveIndex];
ASSERT( primitive.type == cgltf_primitive_type_triangles );
// Index Buffer
size_t const indexStart = pIndices->size();
size_t const indexCount = cgltf_accessor_unpack_indices( primitive.indices, nullptr, sizeof pIndices->at( 0 ), 0 );
ASSERT( indexCount > 0 );
pIndices->resize( indexStart + indexCount );
cgltf_accessor_unpack_indices(
primitive.indices, pIndices->data() + indexStart, sizeof pIndices->at( 0 ), indexCount );
// Material
uint32_t materialIdx = UINT32_MAX;
if ( primitive.material )
{
materialIdx = ProcessMaterial( renderDevice, model, *primitive.material );
}
model->primitives.push_back( Primitive{
.indexStart = static_cast<uint32_t>( indexStart ),
.indexCount = static_cast<uint32_t>( indexCount ),
.material = materialIdx,
.vertexOffset = vertexStart,
} );
cgltf_attribute const* attributes = primitive.attributes;
for ( uint32_t attribIndex = 0; attribIndex < primitive.attributes_count; ++attribIndex )
{
if ( "POSITION"sv == attributes[attribIndex].name )
{
cgltf_attribute const& positionAttr = attributes[attribIndex];
ASSERT( positionAttr.data->component_type == cgltf_component_type_r_32f );
ASSERT( positionAttr.data->type == cgltf_type_vec3 );
std::pmr::vector<DirectX::XMFLOAT3> positions{ pVertices->get_allocator() };
size_t const floatCount = cgltf_accessor_unpack_floats( positionAttr.data, nullptr, 0 );
positions.resize( floatCount / 3 );
cgltf_accessor_unpack_floats(
positionAttr.data, reinterpret_cast<cgltf_float*>( positions.data() ), floatCount );
// Guaranteed to have space for these vertices.
pVertices->resize( vertexStart + positions.size() );
auto vertexIter = pVertices->begin() + vertexStart;
for ( DirectX::XMFLOAT3 const& position : positions )
{
vertexIter->position = position;
++vertexIter;
}
}
if ( "NORMAL"sv == attributes[attribIndex].name )
{
cgltf_attribute const& normalAttr = attributes[attribIndex];
ASSERT( normalAttr.data->component_type == cgltf_component_type_r_32f );
ASSERT( normalAttr.data->type == cgltf_type_vec3 );
std::pmr::vector<DirectX::XMFLOAT3> normals{ pVertices->get_allocator() };
size_t const floatCount = cgltf_accessor_unpack_floats( normalAttr.data, nullptr, 0 );
normals.resize( floatCount / 3 );
cgltf_accessor_unpack_floats( normalAttr.data, reinterpret_cast<cgltf_float*>( normals.data() ), floatCount );
// Guaranteed to have space for these vertices.
pVertices->resize( vertexStart + normals.size() );
auto vertexIter = pVertices->begin() + vertexStart;
for ( DirectX::XMFLOAT3 const& normal : normals )
{
vertexIter->normal = normal;
++vertexIter;
}
}
if ( "TEXCOORD_0"sv == attributes[attribIndex].name )
{
cgltf_attribute const& texCoordAttr = attributes[attribIndex];
ASSERT( texCoordAttr.data->component_type == cgltf_component_type_r_32f );
ASSERT( texCoordAttr.data->type == cgltf_type_vec2 );
std::pmr::vector<DirectX::XMFLOAT2> texCoords{ pVertices->get_allocator() };
size_t const floatCount = cgltf_accessor_unpack_floats( texCoordAttr.data, nullptr, 0 );
texCoords.resize( floatCount / 2 );
cgltf_accessor_unpack_floats(
texCoordAttr.data, reinterpret_cast<cgltf_float*>( texCoords.data() ), floatCount );
// Guaranteed to have space for these vertices.
pVertices->resize( vertexStart + texCoords.size() );
auto vertexIter = pVertices->begin() + vertexStart;
for ( DirectX::XMFLOAT2 const& texCoord : texCoords )
{
vertexIter->texCoord0 = texCoord;
++vertexIter;
}
}
if ( "TEXCOORD_1"sv == attributes[attribIndex].name )
{
cgltf_attribute const& texCoordAttr = attributes[attribIndex];
ASSERT( texCoordAttr.data->component_type == cgltf_component_type_r_32f );
ASSERT( texCoordAttr.data->type == cgltf_type_vec2 );
std::pmr::vector<DirectX::XMFLOAT2> texCoords{ pVertices->get_allocator() };
size_t const floatCount = cgltf_accessor_unpack_floats( texCoordAttr.data, nullptr, 0 );
texCoords.resize( floatCount / 2 );
cgltf_accessor_unpack_floats(
texCoordAttr.data, reinterpret_cast<cgltf_float*>( texCoords.data() ), floatCount );
// Guaranteed to have space for these vertices.
pVertices->resize( vertexStart + texCoords.size() );
auto vertexIter = pVertices->begin() + vertexStart;
for ( DirectX::XMFLOAT2 const& texCoord : texCoords )
{
vertexIter->texCoord1 = texCoord;
++vertexIter;
}
}
if ( "COLOR_0"sv == attributes[attribIndex].name )
{
cgltf_attribute const& colorAttr = attributes[attribIndex];
ASSERT( colorAttr.data->component_type == cgltf_component_type_r_32f );
ASSERT( colorAttr.data->type == cgltf_type_vec3 or colorAttr.data->type == cgltf_type_vec4 );
if ( colorAttr.data->type == cgltf_type_vec3 )
{
std::pmr::vector<DirectX::XMFLOAT3> colors{ pVertices->get_allocator() };
size_t const floatCount = cgltf_accessor_unpack_floats( colorAttr.data, nullptr, 0 );
colors.resize( floatCount / 3 );
cgltf_accessor_unpack_floats( colorAttr.data, reinterpret_cast<cgltf_float*>( colors.data() ), floatCount );
// Guaranteed to have space for these vertices.
pVertices->resize( vertexStart + colors.size() );
auto vertexIter = pVertices->begin() + vertexStart;
for ( DirectX::XMFLOAT3 const& color : colors )
{
vertexIter->color0 = { color.x, color.y, color.z, 1.0f };
++vertexIter;
}
}
else // Since only two options
{
std::pmr::vector<DirectX::XMFLOAT4> colors{ pVertices->get_allocator() };
size_t const floatCount = cgltf_accessor_unpack_floats( colorAttr.data, nullptr, 0 );
colors.resize( floatCount / 4 );
cgltf_accessor_unpack_floats( colorAttr.data, reinterpret_cast<cgltf_float*>( colors.data() ), floatCount );
// Guaranteed to have space for these vertices.
pVertices->resize( vertexStart + colors.size() );
auto vertexIter = pVertices->begin() + vertexStart;
for ( DirectX::XMFLOAT4 const& color : colors )
{
vertexIter->color0 = color;
++vertexIter;
}
}
}
// TODO: Grab other attributes.
}
}
return { primitiveStart, primitiveCount };
}
Entity* ProcessNode(
RenderDevice* renderDevice,
EntityManager* entityManager,
Model* model,
std::pmr::vector<Vertex>* vertices,
std::pmr::vector<uint32_t>* indices,
cgltf_node const& node )
{
DirectX::XMVECTOR vTranslation;
DirectX::XMVECTOR qRotation;
DirectX::XMVECTOR vScale;
if ( node.has_matrix )
{
DirectX::XMMATRIX const mat = DirectX::XMMATRIX{ node.matrix };
ASSERT( DirectX::XMMatrixDecompose( &vScale, &qRotation, &vTranslation, mat ) );
}
else
{
vTranslation = node.has_translation
? DirectX::XMVectorSet( node.translation[0], node.translation[1], node.translation[2], 1.0f )
: DirectX::XMVectorZero();
qRotation = node.has_rotation
? DirectX::XMVectorSet( node.rotation[0], node.rotation[1], node.rotation[2], node.rotation[3] )
: DirectX::XMQuaternionIdentity();
vScale = node.has_scale ? DirectX::XMVectorSet( node.scale[0], node.scale[1], node.scale[2], 1.0f )
: DirectX::XMVectorSplatOne();
}
auto tx = Transform{
.translation = vTranslation,
.rotation = qRotation,
.scale = vScale,
};
Entity* entity = entityManager->createEntity( tx );
if ( node.mesh )
{
entity->modelMesh = ProcessMesh( renderDevice, model, vertices, indices, *node.mesh );
}
for ( uint32_t childIdx = 0; childIdx < node.children_count; ++childIdx )
{
entity->addChild( ProcessNode( renderDevice, entityManager, model, vertices, indices, *node.children[childIdx] ) );
}
return entity;
}
Entity* LoadModel( RenderDevice* renderDevice, EntityManager* entityManager, const char* filename )
{
cgltf_data* gltfModel = nullptr;
cgltf_options options = {};
cgltf_result result = cgltf_parse_file( &options, filename, &gltfModel );
if ( result != cgltf_result_success )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "%s failed to load", filename );
cgltf_free( gltfModel );
return nullptr;
}
result = cgltf_validate( gltfModel );
if ( result != cgltf_result_success )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "%s is invalid.", filename );
cgltf_free( gltfModel );
return nullptr;
}
result = cgltf_load_buffers( &options, gltfModel, filename );
if ( result != cgltf_result_success )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "%s buffers failed to load.", filename );
cgltf_free( gltfModel );
return nullptr;
}
Entity* entity = entityManager->createEntity( {
.translation = DirectX::XMVectorZero(),
.rotation = DirectX::XMQuaternionIdentity(),
.scale = DirectX::XMVectorSplatOne(),
} );
// Output data
std::pmr::vector<Vertex> vertices;
std::pmr::vector<uint32_t> indices;
cgltf_scene const* currentScene = gltfModel->scene;
for ( uint32_t nodeIdx = 0; nodeIdx < currentScene->nodes_count; ++nodeIdx )
{
entity->addChild( ProcessNode(
renderDevice, entityManager, &entity->model, &vertices, &indices, *currentScene->nodes[nodeIdx] ) );
}
auto vertexBuffer = renderDevice->bufferManager->createVertexBuffer( vertices.size() * sizeof vertices[0] );
if ( not vertexBuffer )
{
return nullptr;
}
entity->model.vertexBuffer = std::move( vertexBuffer.value() );
renderDevice->bufferManager->writeToBuffer( entity->model.vertexBuffer, vertices );
auto indexBuffer = renderDevice->bufferManager->createIndexBuffer( indices.size() * sizeof indices[0] );
if ( not indexBuffer )
{
return nullptr;
}
entity->model.indexBuffer = std::move( indexBuffer.value() );
renderDevice->bufferManager->writeToBuffer( entity->model.indexBuffer, std::span{ indices } );
cgltf_free( gltfModel );
return entity;
}

82
Blaze/ModelLoader.h Normal file
View File

@ -0,0 +1,82 @@
#pragma once
#include <DirectXMath.h>
#include <memory_resource>
#include <vector>
#include "BufferManager.h"
#include "TextureManager.h"
struct RenderDevice;
struct EntityManager;
struct Entity;
struct GlobalMemory;
struct Vertex
{
DirectX::XMFLOAT3 position = { 0.0f, 0.0f, 0.0f };
DirectX::XMFLOAT3 normal = { 1.0f, 1.0f, 1.0f };
DirectX::XMFLOAT2 texCoord0 = { 0.0f, 0.0f };
DirectX::XMFLOAT2 texCoord1 = { 0.0f, 0.0f };
DirectX::XMFLOAT4 color0 = { 1.0f, 1.0f, 1.0f, 1.0f };
};
struct Primitive
{
uint32_t indexStart;
uint32_t indexCount;
uint32_t material;
int32_t vertexOffset;
};
struct ModelMesh
{
uint32_t primitiveStart = 0;
uint32_t primitiveCount = 0;
[[nodiscard]] bool isNull() const
{
return primitiveCount == 0;
}
};
struct Material
{
constexpr static size_t GPU_DATA_OFFSET = sizeof( VkSampler );
constexpr static size_t GPU_DATA_SIZE =
sizeof( TextureID ) + sizeof( uint32_t ) + 2 * sizeof( float ) + sizeof( DirectX::XMFLOAT4 );
VkSampler sampler; // TODO: Reuse
// To copy directly.
TextureID texture;
uint32_t padding0; // FIXME: Wasting space.
float roughness = 1.0f;
float metallic = 1.0f;
DirectX::XMFLOAT4 baseColor = { 1.0f, 1.0f, 1.0f, 1.0f };
[[nodiscard]] bool isNull() const
{
return texture.isNull() or sampler;
}
};
static_assert( sizeof( Material ) == Material::GPU_DATA_OFFSET + Material::GPU_DATA_SIZE );
static constexpr Material DEFAULT_MATERIAL = {};
struct Model
{
std::pmr::monotonic_buffer_resource mem;
BufferID vertexBuffer;
BufferID indexBuffer;
std::pmr::vector<Material> materials;
std::pmr::vector<Primitive> primitives;
[[nodiscard]] bool isNull() const
{
return vertexBuffer.isNull();
}
};
Entity* LoadModel( RenderDevice* renderDevice, EntityManager* entityManager, const char* filename );

0
Blaze/RID.cpp Normal file
View File

55
Blaze/RID.h Normal file
View File

@ -0,0 +1,55 @@
#pragma once
#include <cstdint>
template <typename T>
struct RID
{
private:
uint32_t m_index = 0;
explicit RID( uint32_t const index ) : m_index{ index }
{}
public:
RID() = default;
// No copy
RID( RID const& ) = delete;
RID& operator=( RID const& ) = delete;
// Move allowed
RID( RID&& other ) noexcept;
RID& operator=( RID&& other ) noexcept;
[[nodiscard]] bool isNull() const
{
return m_index == 0;
}
static RID null()
{
return {};
}
operator bool() const
{
return m_index != 0;
}
};
template <typename T>
RID<T>::RID( RID&& other ) noexcept : m_index{ other.m_index }
{
other.m_index = 0;
}
template <typename T>
RID<T>& RID<T>::operator=( RID&& other ) noexcept
{
if ( this == &other ) return *this;
m_index = other.m_index;
other.m_index = 0;
return *this;
}

View File

@ -8,6 +8,7 @@
#include <optional>
#include <span>
#include "BufferManager.h"
#include "Frame.h"
#include "GlobalMemory.h"
#include "MathUtil.h"
@ -171,13 +172,29 @@ RenderDevice* RenderDevice_Create( GlobalMemory* mem, RenderDevice::CreateInfo c
.pQueuePriorities = &priority,
};
VkPhysicalDeviceVulkan13Features constexpr features13 = {
VkPhysicalDeviceVulkan13Features features13 = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES,
.pNext = nullptr,
.synchronization2 = true,
.dynamicRendering = true,
};
VkPhysicalDeviceVulkan12Features const features12 = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES,
.pNext = &features13,
.descriptorIndexing = true,
.shaderSampledImageArrayNonUniformIndexing = true,
.shaderStorageImageArrayNonUniformIndexing = true,
.descriptorBindingUniformBufferUpdateAfterBind = true,
.descriptorBindingSampledImageUpdateAfterBind = true,
.descriptorBindingStorageImageUpdateAfterBind = true,
.descriptorBindingUpdateUnusedWhilePending = true,
.descriptorBindingPartiallyBound = true,
.descriptorBindingVariableDescriptorCount = true,
.runtimeDescriptorArray = true,
.bufferDeviceAddress = true,
};
VkPhysicalDeviceFeatures features = {
.depthClamp = true,
.samplerAnisotropy = true,
@ -187,7 +204,7 @@ RenderDevice* RenderDevice_Create( GlobalMemory* mem, RenderDevice::CreateInfo c
VkDeviceCreateInfo const deviceCreateInfo = {
.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
.pNext = &features13,
.pNext = &features12,
.flags = 0,
.queueCreateInfoCount = 1,
.pQueueCreateInfos = &queueCreateInfo,
@ -202,7 +219,7 @@ RenderDevice* RenderDevice_Create( GlobalMemory* mem, RenderDevice::CreateInfo c
volkLoadDevice( device );
VmaAllocatorCreateInfo allocatorCreateInfo = {
.flags = 0,
.flags = VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT,
.physicalDevice = physicalDeviceInUse,
.device = device,
.preferredLargeHeapBlockSize = 0,
@ -374,7 +391,7 @@ RenderDevice* RenderDevice_Create( GlobalMemory* mem, RenderDevice::CreateInfo c
Frame* frames = reinterpret_cast<Frame*>( mem->allocate( sizeof( Frame ) * swapchainImageCount ) );
for ( uint32_t i = 0; i != swapchainImageCount; ++i )
{
Frame_Create( frames + i, device, directQueueFamilyIndex.value() );
Frame_Create( frames + i, device, gpuAllocator, directQueueFamilyIndex.value(), swapchainExtent );
}
std::byte* allocation = mem->allocate( sizeof( RenderDevice ), alignof( RenderDevice ) );
@ -409,6 +426,18 @@ RenderDevice* RenderDevice_Create( GlobalMemory* mem, RenderDevice::CreateInfo c
ASSERT( renderDevice->textureManager );
BufferManager* bufferManager = BufferManager_Create( mem, renderDevice, 10000 );
if ( !bufferManager )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "BufferManager failed to init" );
renderDevice->destroy();
return nullptr;
}
renderDevice->bufferManager = bufferManager;
ASSERT( renderDevice->bufferManager );
return renderDevice;
}
@ -421,6 +450,7 @@ void RenderDevice::destroy()
{
if ( not isInit() ) return;
Take( bufferManager )->destroy();
Take( textureManager )->destroy();
for ( Frame& frame : std::span{ Take( frames ), swapchainImageCount } )

View File

@ -1,14 +1,12 @@
#pragma once
#include <volk.h>
#define VMA_STATIC_VULKAN_FUNCTIONS 0
#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0
#include <vma/vk_mem_alloc.h>
#include <SDL3/SDL_video.h>
#include <SDL3/SDL_vulkan.h>
#include "VulkanHeader.h"
struct BufferManager;
struct GlobalMemory;
struct Frame;
struct TextureManager;
@ -45,6 +43,7 @@ struct RenderDevice
uint32_t frameIndex = 0;
TextureManager* textureManager;
BufferManager* bufferManager;
[[nodiscard]] bool isInit() const;
void destroy();

View File

@ -1,10 +1,13 @@
#include "TextureManager.h"
#include "FreeList.h"
#include "GlobalMemory.h"
#include "RenderDevice.h"
std::optional<TextureID> TextureManager::createTexture( VkExtent3D const extent )
template struct RID<Texture>;
std::optional<TextureID> TextureManager::createTexture( VkExtent3D const extent, VkSampler sampler )
{
if ( m_freeList.empty() )
{
@ -12,6 +15,7 @@ std::optional<TextureID> TextureManager::createTexture( VkExtent3D const extent
}
Texture* textureSlot = reinterpret_cast<Texture*>( m_freeList.popFront() );
++m_count;
ASSERT( m_pRenderDevice );
RenderDevice const& renderDevice = *m_pRenderDevice;
@ -95,13 +99,37 @@ std::optional<TextureID> TextureManager::createTexture( VkExtent3D const extent
.index = index,
};
uint32_t const innerIndex = index & INDEX_MASK;
// TODO: Batch all writes.
VkDescriptorImageInfo const descriptorImageInfo = {
.sampler = sampler,
.imageView = textureView,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
};
VkWriteDescriptorSet const descriptorWrite = {
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.pNext = nullptr,
.dstSet = m_descriptorSet,
.dstBinding = 0,
.dstArrayElement = innerIndex,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &descriptorImageInfo,
.pBufferInfo = nullptr,
.pTexelBufferView = nullptr,
};
vkUpdateDescriptorSets( renderDevice.device, 1, &descriptorWrite, 0, nullptr );
// NOTE: Memory hackery to create TextureID;
return *reinterpret_cast<TextureID*>( &index );
return std::move( *reinterpret_cast<TextureID*>( &index ) );
}
bool TextureManager::isValidID( TextureID rid ) const
bool TextureManager::isValidID( TextureID const& rid ) const
{
uint32_t const index = *reinterpret_cast<uint32_t*>( &rid );
uint32_t const index = *reinterpret_cast<uint32_t const*>( &rid );
uint32_t const innerIndex = index & INDEX_MASK;
if ( innerIndex > m_capacity ) return false;
@ -109,29 +137,41 @@ bool TextureManager::isValidID( TextureID rid ) const
return m_aTextures[innerIndex].index == index;
}
void TextureManager::freeTexture( TextureID const rid )
void TextureManager::freeTexture( TextureID&& rid )
{
if ( not isValidID( rid ) ) return;
Texture& texture = fetchTextureUnchecked( rid );
destroyTexture( texture );
auto _ = std::move( rid );
}
std::optional<VkImage> TextureManager::fetchImage( TextureID const rid )
std::optional<VkImage> TextureManager::fetchImage( TextureID const& rid )
{
if ( not isValidID( rid ) ) return std::nullopt;
return fetchTextureUnchecked( rid ).image;
}
std::optional<VkImageView> TextureManager::fetchImageView( TextureID const rid )
std::optional<VkImageView> TextureManager::fetchImageView( TextureID const& rid )
{
if ( not isValidID( rid ) ) return std::nullopt;
return fetchTextureUnchecked( rid ).view;
}
VkDescriptorSetLayout const& TextureManager::descriptorLayout() const
{
return m_descriptorSetLayout;
}
VkDescriptorSet const& TextureManager::descriptorSet() const
{
return m_descriptorSet;
}
void TextureManager::destroy()
{
#if defined( _DEBUG )
@ -141,6 +181,9 @@ void TextureManager::destroy()
}
#endif
ASSERT( m_pRenderDevice );
RenderDevice const& renderDevice = *m_pRenderDevice;
while ( not m_freeList.empty() )
{
Texture* tex = reinterpret_cast<Texture*>( m_freeList.popFront() );
@ -151,7 +194,12 @@ void TextureManager::destroy()
{
destroyTexture( tex );
}
m_descriptorSet = nullptr;
vkDestroyDescriptorPool( renderDevice.device, Take( m_descriptorPool ), nullptr );
vkDestroyDescriptorSetLayout( renderDevice.device, Take( m_descriptorSetLayout ), nullptr );
}
TextureManager::~TextureManager()
{
ASSERT( not m_aTextures );
@ -182,6 +230,7 @@ void TextureManager::destroyTexture( Texture& tex )
ASSERT( tex.index > index and "Generation should increase." );
m_freeList.pushBack( reinterpret_cast<FreeList::Node*>( &tex ) );
--m_count;
}
uint32_t TextureManager::calculateRequiredMipLevels( uint32_t const w, uint32_t const h, uint32_t const d )
@ -190,34 +239,106 @@ uint32_t TextureManager::calculateRequiredMipLevels( uint32_t const w, uint32_t
return 1 + static_cast<uint32_t>( floorf( log2f( static_cast<float>( maxDim ) ) ) );
}
Texture& TextureManager::fetchTextureUnchecked( TextureID rid )
Texture& TextureManager::fetchTextureUnchecked( TextureID const& rid )
{
uint32_t const index = *reinterpret_cast<uint32_t*>( &rid );
uint32_t const index = *reinterpret_cast<uint32_t const*>( &rid );
uint32_t const innerIndex = index & INDEX_MASK;
return m_aTextures[innerIndex];
}
TextureManager::TextureManager( RenderDevice* pRenderDevice, Texture* aTextures, uint32_t const capacity )
: m_pRenderDevice{ pRenderDevice }, m_aTextures{ aTextures }, m_count{ 0 }, m_capacity{ capacity }
TextureManager::TextureManager(
RenderDevice* pRenderDevice,
Texture* aTextures,
uint32_t const capacity,
VkDescriptorSetLayout const setLayout,
VkDescriptorPool const pool,
VkDescriptorSet const descriptorSet )
: m_pRenderDevice{ pRenderDevice }
, m_aTextures{ aTextures }
, m_count{ 0 }
, m_capacity{ capacity }
, m_descriptorSetLayout{ setLayout }
, m_descriptorPool{ pool }
, m_descriptorSet{ descriptorSet }
{
uint32_t i = 0;
for ( Texture& tex : std::span{ m_aTextures, m_capacity } )
{
// Default Generation is 1
// TODO: Fix this by creating 0,0 as a valid texture.
tex.index = i++ | ( 1 << GENERATION_OFFSET );
m_freeList.pushFront( reinterpret_cast<FreeList::Node*>( &tex ) );
}
}
TextureManager* TextureManager_Create( GlobalMemory* mem, RenderDevice* renderDevice, uint32_t maxCount )
TextureManager* TextureManager_Create( GlobalMemory* mem, RenderDevice* renderDevice, uint32_t const maxCount )
{
Texture* textures = reinterpret_cast<Texture*>( mem->allocate( maxCount * sizeof( Texture ), alignof( Texture ) ) );
if ( not textures ) return nullptr;
VkDescriptorSetLayoutBinding const descriptorSetLayoutBinding{
.binding = 0,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = maxCount,
.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
.pImmutableSamplers = nullptr,
};
VkDescriptorBindingFlags flags = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT |
VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT |
VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT;
VkDescriptorSetLayoutBindingFlagsCreateInfo const bindlessBinding = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO,
.pNext = nullptr,
.bindingCount = 1,
.pBindingFlags = &flags,
};
VkDescriptorSetLayoutCreateInfo const descriptorSetLayoutCreateInfo = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.pNext = &bindlessBinding,
.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT,
.bindingCount = 1,
.pBindings = &descriptorSetLayoutBinding,
};
VkDescriptorSetLayout descriptorSetLayout;
VK_CHECK( vkCreateDescriptorSetLayout(
renderDevice->device, &descriptorSetLayoutCreateInfo, nullptr, &descriptorSetLayout ) );
VkDescriptorPoolSize const poolSize = {
.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = maxCount,
};
VkDescriptorPoolCreateInfo const descriptorPoolCreateInfo = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
.pNext = nullptr,
.flags = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT,
.maxSets = 1,
.poolSizeCount = 1,
.pPoolSizes = &poolSize,
};
VkDescriptorPool descriptorPool;
VK_CHECK( vkCreateDescriptorPool( renderDevice->device, &descriptorPoolCreateInfo, nullptr, &descriptorPool ) );
VkDescriptorSetAllocateInfo const descriptorSetAllocateInfo = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
.pNext = nullptr,
.descriptorPool = descriptorPool,
.descriptorSetCount = 1,
.pSetLayouts = &descriptorSetLayout,
};
VkDescriptorSet descriptorSet;
VK_CHECK( vkAllocateDescriptorSets( renderDevice->device, &descriptorSetAllocateInfo, &descriptorSet ) );
std::byte* allocation = mem->allocate( sizeof( TextureManager ), alignof( TextureManager ) );
if ( not allocation ) return nullptr;
return new ( allocation ) TextureManager{ renderDevice, textures, maxCount };
return new ( allocation )
TextureManager{ renderDevice, textures, maxCount, descriptorSetLayout, descriptorPool, descriptorSet };
}

View File

@ -2,136 +2,16 @@
#include <optional>
#include <span>
#include <volk.h>
#include <vma/vk_mem_alloc.h>
#include "FreeList.h"
#include "MacroUtils.h"
#include "RID.h"
#include "RenderDevice.h"
#include "VulkanHeader.h"
struct GlobalMemory;
struct RenderDevice;
struct FreeListNode
{
FreeListNode* pNext;
FreeListNode* pPrev;
};
struct FreeList
{
using Node = FreeListNode;
struct Iterator
{
FreeListNode* pIter;
Iterator& operator++()
{
pIter = pIter->pNext;
return *this;
}
bool operator==( Iterator const& other ) const
{
return this->pIter == other.pIter;
}
FreeListNode& operator*()
{
return *pIter;
}
};
private:
FreeListNode m_head;
FreeListNode m_tail;
public:
FreeList() : m_head{ .pNext = &m_tail, .pPrev = nullptr }, m_tail{ .pNext = nullptr, .pPrev = &m_head }
{}
void pushBack( Node* pNode )
{
Node* prev = m_tail.pPrev;
// Set prev as previous of pNode
prev->pNext = pNode;
pNode->pPrev = prev;
// Set tail as next of pNode
pNode->pNext = &m_tail;
m_tail.pPrev = pNode;
}
void pushFront( Node* pNode )
{
Node* next = m_head.pNext;
// Set next as next of pNode
next->pPrev = pNode;
pNode->pNext = next;
// Set head as prev of pNode
pNode->pPrev = &m_head;
m_head.pNext = pNode;
}
Node* popFront()
{
ASSERT( not empty() );
Node* element = m_head.pNext;
element->pPrev->pNext = element->pNext;
element->pNext->pPrev = element->pPrev;
return element;
}
[[nodiscard]] bool empty() const
{
return m_head.pNext == &m_tail;
}
Iterator begin()
{
return { m_head.pNext };
}
Iterator end()
{
return { &m_tail };
}
FreeList( FreeList&& ) = delete;
FreeList( FreeList const& ) = delete;
FreeList& operator=( FreeList const& ) = delete;
FreeList& operator=( FreeList&& ) = delete;
~FreeList() = default;
};
template <typename T>
struct RID
{
private:
uint32_t m_index = 0;
explicit RID( uint32_t const index ) : m_index{ index } {};
public:
RID() = default;
static RID null()
{
return {};
}
operator bool() const
{
return m_index == 0;
}
};
struct Texture
{
VkImage image;
@ -142,10 +22,12 @@ struct Texture
uint32_t index;
};
static_assert( sizeof( Texture ) > sizeof( FreeListNode ) and "Texture is used intrusively by FreeList" );
static_assert( sizeof( Texture ) > sizeof( FreeList::Node ) and "Texture is used intrusively by FreeList" );
static_assert(
offsetof( Texture, index ) >= sizeof( FreeListNode ) and "Index should not be overwritten even in invalid state" );
offsetof( Texture, index ) >= sizeof( FreeList::Node ) and
"Index should not be overwritten even in invalid state" );
extern template struct RID<Texture>;
using TextureID = RID<Texture>;
struct TextureManager
@ -160,28 +42,50 @@ private:
RenderDevice* m_pRenderDevice;
Texture* m_aTextures;
uint32_t m_count;
uint32_t m_capacity;
FreeList m_freeList;
// Texture Manager
Texture* m_aTextures;
uint32_t m_count;
uint32_t m_capacity;
FreeList m_freeList;
void destroyTexture( Texture& tex );
// Bindless Descriptor Info
VkDescriptorSetLayout m_descriptorSetLayout;
VkDescriptorPool m_descriptorPool;
VkDescriptorSet m_descriptorSet;
Texture& fetchTextureUnchecked( TextureID rid );
void destroyTexture( Texture& tex );
Texture& fetchTextureUnchecked( TextureID const& rid );
public:
static uint32_t calculateRequiredMipLevels( uint32_t w, uint32_t h, uint32_t d );
static uint32_t calculateRequiredMipLevels( uint32_t w, uint32_t h, uint32_t d );
[[nodiscard]] bool isValidID( TextureID rid ) const;
[[nodiscard]] std::optional<TextureID> createTexture( VkExtent3D extent );
void freeTexture( TextureID rid );
[[nodiscard]] bool isValidID( TextureID const& rid ) const;
// [[nodiscard]] std::optional<TextureID> createTexture( VkExtent3D extent );
void freeTexture( TextureID&& rid );
DEPRECATE_JULY_2025
std::optional<VkImage> fetchImage( TextureID rid );
std::optional<VkImageView> fetchImageView( TextureID rid );
[[nodiscard]] std::optional<TextureID> createTexture( VkExtent3D extent, VkSampler sampler );
DEPRECATE_JULY_2025
std::optional<VkImage> fetchImage( TextureID const& rid );
DEPRECATE_JULY_2025
std::optional<VkImageView> fetchImageView( TextureID const& rid );
[[nodiscard]] VkDescriptorSetLayout const& descriptorLayout() const;
[[nodiscard]] VkDescriptorSet const& descriptorSet() const;
//
TextureManager( RenderDevice* pRenderDevice, Texture* aTextures, uint32_t capacity );
TextureManager(
RenderDevice* pRenderDevice,
Texture* aTextures,
uint32_t capacity,
VkDescriptorSetLayout setLayout,
VkDescriptorPool pool,
VkDescriptorSet descriptorSet );
void destroy();
TextureManager( TextureManager const& other ) = delete;

7
Blaze/VulkanHeader.h Normal file
View File

@ -0,0 +1,7 @@
#pragma once
#include <volk.h>
#define VMA_STATIC_VULKAN_FUNCTIONS 0
#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0
#include <vma/vk_mem_alloc.h>

View File

@ -17,10 +17,10 @@
- [X] Create a Triangle
- [X] Create pipeline
- [X] Draw
- [ ] Create a Box
- [ ] Create Vertex buffer
- [ ] Load texture
- [ ] Draw
- [X] Create a Box
- [X] Create Vertex buffer
- [X] Load texture
- [X] Draw
- [ ] Refactor
## Features

View File

@ -4,6 +4,7 @@
"shader-slang",
"vulkan-memory-allocator",
"directxmath",
"cgltf",
"stb",
{
"name": "sdl3",