Compare commits

..

5 Commits

Author SHA1 Message Date
Anish Bhobe 74634a2a5e Integrated Directional Light. 2025-07-02 15:54:11 +02:00
Anish Bhobe d6907f0503 clean: Mass renames, format, fixed scheme. 2025-07-02 15:46:02 +02:00
Anish Bhobe 6d19360f3f Re-add missing resource files. 2025-07-02 13:32:13 +02:00
Anish Bhobe 490d8b7f95 clean: RID and Resource creation. 2025-07-02 13:14:38 +02:00
Anish Bhobe 751af977ac File Hierarchy Reorganization. 2025-07-02 09:30:11 +02:00
61 changed files with 3762 additions and 3713 deletions

View File

@ -21,6 +21,7 @@ AlwaysBreakAfterDefinitionReturnType: false
AlwaysBreakTemplateDeclarations: Yes
BinPackArguments: false
BinPackParameters: false
PackConstructorInitializers: CurrentLine
BraceWrapping:
AfterCaseLabel: true
AfterClass: true
@ -42,7 +43,8 @@ BraceWrapping:
SplitEmptyNamespace: false
BreakConstructorInitializers: BeforeComma
ConstructorInitializerIndentWidth: 2
Cpp11BracedListStyle: false
ContinuationIndentWidth: 2
Cpp11BracedListStyle: true
IncludeCategories:
- Regex: ^<.*
Priority: 1
@ -70,4 +72,4 @@ ConstructorInitializerAllOnOneLineOrOnePerLine: true
SpaceInEmptyParentheses: false
SpacesInConditionalStatement: true
SpacesInCStyleCastParentheses: false
AlignArrayOfStructures: Right
AlignArrayOfStructures: Left

View File

@ -3,7 +3,7 @@ Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Version 17
VisualStudioVersion = 17.13.36105.23
MinimumVisualStudioVersion = 10.0.40219.1
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Blaze", "Blaze.vcxproj", "{92E725FE-D87B-4FDE-8371-5B2CE60945FD}"
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Blaze", "Blaze\Blaze.vcxproj", "{92E725FE-D87B-4FDE-8371-5B2CE60945FD}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution

View File

@ -1,4 +1,27 @@
<wpf:ResourceDictionary xml:space="preserve" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" xmlns:s="clr-namespace:System;assembly=mscorlib" xmlns:ss="urn:shemas-jetbrains-com:settings-storage-xaml" xmlns:wpf="http://schemas.microsoft.com/winfx/2006/xaml/presentation">
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Class_0020and_0020struct_0020methods/@EntryIndexedValue">&lt;NamingElement Priority="10"&gt;&lt;Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"&gt;&lt;type Name="member function" /&gt;&lt;/Descriptor&gt;&lt;Policy Inspect="True" Prefix="" Suffix="" Style="aaBb"&gt;&lt;ExtraRule Prefix="" Suffix="" Style="aa_bb" /&gt;&lt;/Policy&gt;&lt;/NamingElement&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Local_0020variables/@EntryIndexedValue">&lt;NamingElement Priority="7"&gt;&lt;Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"&gt;&lt;type Name="local variable" /&gt;&lt;/Descriptor&gt;&lt;Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /&gt;&lt;/NamingElement&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Parameters/@EntryIndexedValue">&lt;NamingElement Priority="6"&gt;&lt;Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"&gt;&lt;type Name="function parameter" /&gt;&lt;type Name="lambda parameter" /&gt;&lt;/Descriptor&gt;&lt;Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /&gt;&lt;/NamingElement&gt;</s:String></wpf:ResourceDictionary>
<s:Boolean x:Key="/Default/CodeStyle/CodeFormatting/CppClangFormat/EnableClangFormatSupport/@EntryValue">True</s:Boolean>
<s:String x:Key="/Default/CodeStyle/CodeFormatting/CppClangFormat/ExecutableToUse/@EntryValue">FromPath</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Abbreviations/=ID/@EntryIndexedValue">ID</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Class_0020and_0020struct_0020fields/@EntryIndexedValue">&lt;NamingElement Priority="11"&gt;&lt;Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"&gt;&lt;type Name="class field" /&gt;&lt;type Name="struct field" /&gt;&lt;/Descriptor&gt;&lt;Policy Inspect="True" Prefix="m_" Suffix="" Style="aaBb" /&gt;&lt;/NamingElement&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Class_0020and_0020struct_0020methods/@EntryIndexedValue">&lt;NamingElement Priority="10"&gt;&lt;Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"&gt;&lt;type Name="member function" /&gt;&lt;/Descriptor&gt;&lt;Policy Inspect="True" Prefix="" Suffix="" Style="AaBb"&gt;&lt;ExtraRule Prefix="" Suffix="" Style="aa_bb" /&gt;&lt;/Policy&gt;&lt;/NamingElement&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Class_0020and_0020struct_0020public_0020fields/@EntryIndexedValue">&lt;NamingElement Priority="12"&gt;&lt;Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="PUBLIC"&gt;&lt;type Name="class field" /&gt;&lt;type Name="struct field" /&gt;&lt;/Descriptor&gt;&lt;Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /&gt;&lt;/NamingElement&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Classes_0020and_0020structs/@EntryIndexedValue">&lt;NamingElement Priority="1"&gt;&lt;Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"&gt;&lt;type Name="__interface" /&gt;&lt;type Name="class" /&gt;&lt;type Name="enum" /&gt;&lt;type Name="struct" /&gt;&lt;type Name="union" /&gt;&lt;/Descriptor&gt;&lt;Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /&gt;&lt;/NamingElement&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Concepts/@EntryIndexedValue">&lt;NamingElement Priority="2"&gt;&lt;Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE" /&gt;&lt;Policy Inspect="True" Prefix="" Suffix="" Style="aa_bb" /&gt;&lt;/NamingElement&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Enum_0020members/@EntryIndexedValue">&lt;NamingElement Priority="14"&gt;&lt;Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"&gt;&lt;type Name="scoped enumerator" /&gt;&lt;type Name="unscoped enumerator" /&gt;&lt;/Descriptor&gt;&lt;Policy Inspect="True" Prefix="k" Suffix="" Style="AaBb" /&gt;&lt;/NamingElement&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Enums/@EntryIndexedValue">&lt;NamingElement Priority="3"&gt;&lt;Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"&gt;&lt;type Name="enum" /&gt;&lt;/Descriptor&gt;&lt;Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /&gt;&lt;/NamingElement&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Global_0020constants/@EntryIndexedValue">&lt;NamingElement Priority="16"&gt;&lt;Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="True" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"&gt;&lt;type Name="global variable" /&gt;&lt;/Descriptor&gt;&lt;Policy Inspect="True" Prefix="" Suffix="" Style="AA_BB" /&gt;&lt;/NamingElement&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Global_0020functions/@EntryIndexedValue">&lt;NamingElement Priority="9"&gt;&lt;Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"&gt;&lt;type Name="global function" /&gt;&lt;/Descriptor&gt;&lt;Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /&gt;&lt;/NamingElement&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Global_0020variables/@EntryIndexedValue">&lt;NamingElement Priority="8"&gt;&lt;Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"&gt;&lt;type Name="global variable" /&gt;&lt;/Descriptor&gt;&lt;Policy Inspect="True" Prefix="g_" Suffix="" Style="AaBb" /&gt;&lt;/NamingElement&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Macros/@EntryIndexedValue">&lt;NamingElement Priority="19"&gt;&lt;Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"&gt;&lt;type Name="macro" /&gt;&lt;/Descriptor&gt;&lt;Policy Inspect="True" Prefix="" Suffix="" Style="AaBb"&gt;&lt;ExtraRule Prefix="" Suffix="" Style="AA_BB" /&gt;&lt;/Policy&gt;&lt;/NamingElement&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Namespaces/@EntryIndexedValue">&lt;NamingElement Priority="17"&gt;&lt;Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"&gt;&lt;type Name="namespace" /&gt;&lt;type Name="namespace alias" /&gt;&lt;/Descriptor&gt;&lt;Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /&gt;&lt;/NamingElement&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Other_0020constants/@EntryIndexedValue">&lt;NamingElement Priority="15"&gt;&lt;Descriptor Static="True" Constexpr="Indeterminate" Const="True" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"&gt;&lt;type Name="class field" /&gt;&lt;type Name="local variable" /&gt;&lt;type Name="struct field" /&gt;&lt;/Descriptor&gt;&lt;Policy Inspect="True" Prefix="k" Suffix="" Style="AaBb" /&gt;&lt;/NamingElement&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Typedefs/@EntryIndexedValue">&lt;NamingElement Priority="18"&gt;&lt;Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"&gt;&lt;type Name="type alias" /&gt;&lt;type Name="typedef" /&gt;&lt;/Descriptor&gt;&lt;Policy Inspect="True" Prefix="" Suffix="" Style="AaBb"&gt;&lt;ExtraRule Prefix="" Suffix="" Style="aa_bb" /&gt;&lt;/Policy&gt;&lt;/NamingElement&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Union_0020members/@EntryIndexedValue">&lt;NamingElement Priority="13"&gt;&lt;Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"&gt;&lt;type Name="union member" /&gt;&lt;/Descriptor&gt;&lt;Policy Inspect="True" Prefix="" Suffix="" Style="aaBb" /&gt;&lt;/NamingElement&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/Naming/CppNaming/Rules/=Unions/@EntryIndexedValue">&lt;NamingElement Priority="4"&gt;&lt;Descriptor Static="Indeterminate" Constexpr="Indeterminate" Const="Indeterminate" Volatile="Indeterminate" Accessibility="NOT_APPLICABLE"&gt;&lt;type Name="union" /&gt;&lt;/Descriptor&gt;&lt;Policy Inspect="True" Prefix="" Suffix="" Style="AaBb" /&gt;&lt;/NamingElement&gt;</s:String>
<s:Boolean x:Key="/Default/Environment/InjectedLayers/FileInjectedLayer/=BE90B2EAA41A844C846175EA311A0E8C/@KeyIndexDefined">True</s:Boolean>
<s:String x:Key="/Default/Environment/InjectedLayers/FileInjectedLayer/=BE90B2EAA41A844C846175EA311A0E8C/AbsolutePath/@EntryValue">C:\Users\Eon\source\repos\Blaze\Blaze.sln.DotSettings</s:String>
<s:String x:Key="/Default/Environment/InjectedLayers/FileInjectedLayer/=BE90B2EAA41A844C846175EA311A0E8C/RelativePath/@EntryValue"></s:String>
<s:Boolean x:Key="/Default/Environment/InjectedLayers/InjectedLayerCustomization/=FileBE90B2EAA41A844C846175EA311A0E8C/@KeyIndexDefined">True</s:Boolean>
<s:Double x:Key="/Default/Environment/InjectedLayers/InjectedLayerCustomization/=FileBE90B2EAA41A844C846175EA311A0E8C/RelativePriority/@EntryValue">1</s:Double></wpf:ResourceDictionary>

View File

@ -1,87 +0,0 @@
#include "AppState.h"
#include <SDL3/SDL_log.h>
#include "EntityManager.h"
#include "GlobalMemory.h"
#include "MiscData.h"
#include "RenderDevice.h"
#include "TextureManager.h"
bool AppState::isInit() const
{
return window and renderDevice and renderDevice->isInit();
}
void AppState::destroy()
{
if ( !isInit() ) return;
renderDevice->waitIdle();
Take( miscData )->destroy( *renderDevice );
Take( entityManager )->destroy();
Take( renderDevice )->destroy();
SDL_DestroyWindow( Take( window ) );
}
AppState::AppState( SDL_Window* window, RenderDevice* renderDevice, EntityManager* entityManager, MiscData* miscData )
: window{ window }
, renderDevice{ renderDevice }
, entityManager{ entityManager }
, miscData{ miscData }
, sprintfBuffer{ 0 }
{}
AppState* AppState_Create( GlobalMemory* memory, uint32_t const width, uint32_t const height )
{
SDL_Window* window =
SDL_CreateWindow( "Blaze Test", static_cast<int>( width ), static_cast<int>( height ), SDL_WINDOW_VULKAN );
if ( !window )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "%s", SDL_GetError() );
return nullptr;
}
RenderDevice* renderDevice = RenderDevice_Create( memory, { .window = window } );
if ( !renderDevice or !renderDevice->isInit() )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "RenderDevice failed to init" );
SDL_DestroyWindow( window );
return nullptr;
}
EntityManager* entityManager = EntityManager_Create( memory, renderDevice, 1000 );
if ( !entityManager )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "EntityManager failed to init" );
renderDevice->destroy();
SDL_DestroyWindow( window );
return nullptr;
}
auto* miscDataAllocation = memory->allocate( sizeof( MiscData ) );
MiscData* miscData = new ( miscDataAllocation ) MiscData{};
if ( !miscData->init( *renderDevice ) )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "MiscData failed to init" );
entityManager->destroy();
renderDevice->destroy();
SDL_DestroyWindow( window );
return nullptr;
}
auto* allocation = memory->allocate( sizeof( AppState ) );
AppState* appState = new ( allocation ) AppState{ window, renderDevice, entityManager, miscData };
return appState;
}
AppState::~AppState()
{
ASSERT( !isInit() );
}

View File

@ -1,36 +0,0 @@
#pragma once
#include <cstdint>
struct SDL_Window;
struct GlobalMemory;
struct RenderDevice;
struct EntityManager;
struct TextureManager;
struct MiscData;
struct AppState
{
SDL_Window* window;
RenderDevice* renderDevice;
EntityManager* entityManager;
MiscData* miscData;
char sprintfBuffer[256];
[[nodiscard]] bool isInit() const;
void destroy();
AppState( SDL_Window* window, RenderDevice* renderDevice, EntityManager* entityManager, MiscData* miscData );
AppState( AppState const& other ) = delete;
AppState( AppState&& other ) noexcept = delete;
AppState& operator=( AppState const& other ) = delete;
AppState& operator=( AppState&& other ) noexcept = delete;
~AppState();
};
AppState* AppState_Create( GlobalMemory* memory, uint32_t width, uint32_t height );

BIN
Blaze/Assets/Models/DamagedHelmet.glb (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -202,6 +202,46 @@ float3 GetPointLightInfluence(float3 Albedo, float2 MetalRough, float3 Position,
return Contrib;
}
float3 GetDirectionalLightInfluence(float3 Albedo, float2 MetalRough, float3 Position, float3 Normal)
{
if (pfd.lightData.dirLightCount == 0)
return 0.0f.xxx;
float3 ViewDir = normalize(pfd.camera.position.xyz - Position);
float Metallic = MetalRough.r;
float Roughness = MetalRough.g;
// Dielectric F_0 based on LearnOpenGL.
// TODO: Cite
float3 F_0 = 0.04f.xxx;
F_0 = lerp(F_0, Albedo, Metallic);
float3 Contrib = 0.0f;
for (uint i = 0; i < pfd.lightData.dirLightCount; ++i)
{
DirectionalLight Light = pfd.lightData.dirLights[i];
if (Light._padding0 < 0.0f)
continue;
float3 LightDir = -normalize(Light.direction);
// Color Unpack
//float R = (Light.Color & 0xFF000000) >> 24;
//float G = (Light.Color & 0x00FF0000) >> 16;
//float B = (Light.Color & 0x0000FF00) >> 8;
//float3 LightColor = Light.Intensity * float3(R, G, B) * 0.00392156862f; // 0.00392156862 = 1/255
float3 LightColor = Light.color;
Contrib += GetPBRContrib(Albedo, LightColor, ViewDir, Normal, Metallic, Roughness, F_0, LightDir, 1.0f);
}
return Contrib;
}
[shader("fragment")]
float4 FragmentMain(
float4 position : POSITION,
@ -215,22 +255,12 @@ float4 FragmentMain(
float3 N = pcb.material.getNormal(position.xyz, normal.xyz, tangent, texCoord0);
float2 metalRough = pcb.material.getMetalRough(texCoord0);
let albedo = pcb.material.getAlbedo(texCoord0, vertexColor0);
let viewDir = normalize(position.xyz - pfd.camera.position.xyz);
float4 albedo = pcb.material.getAlbedo(texCoord0, vertexColor0);
float3 viewDir = normalize(position.xyz - pfd.camera.position.xyz);
//float3 f_0 = 0.04f.xxx;
//f_0 = lerp(f_0, albedo.rgb, metalRough.x);
float3 pointContrib = GetPointLightInfluence(albedo.rgb, metalRough, position.xyz, N);
float3 dirContrib = GetDirectionalLightInfluence(albedo.rgb, metalRough, position.xyz, N);
//float3 contrib = 0.0f.xxx;
//for (uint i = 0; i < pfd.lightData.pointLightCount; ++i) {
// PointLight pointlight = pfd.lightData.pointLights[i];
// contrib += pointlight.getInfluence(albedo.rgb, metalRough, viewDir, position.xyz, N, f_0);
//}
let contrib = GetPointLightInfluence(albedo.rgb, metalRough, position.xyz, N);
return float4(pcb.material.getEmissive(texCoord0) + contrib, 1.0f);
return float4(pcb.material.getEmissive(texCoord0) + pointContrib + dirContrib, 1.0f);
}

View File

@ -1,376 +0,0 @@
// Blaze.cpp : This file contains the 'main' function. Program execution begins and ends there.
//
#include <array>
#include <cassert>
#include <functional>
#include <limits>
#include <span>
#define SDL_MAIN_USE_CALLBACKS 1
#include <SDL3/SDL.h>
#include <SDL3/SDL_filesystem.h>
#include <SDL3/SDL_main.h>
#include <SDL3/SDL_vulkan.h>
#include "VulkanHeader.h"
#include "AppState.h"
#include "EntityManager.h"
#include "Frame.h"
#include "GlobalMemory.h"
#include "MacroUtils.h"
#include "MathUtil.h"
#include "MiscData.h"
#include "RenderDevice.h"
#include "ModelLoader.h"
constexpr uint32_t WIDTH = 1280;
constexpr uint32_t HEIGHT = 720;
constexpr uint32_t NUM_FRAMES = 3;
namespace Blaze::Global
{
GlobalMemory g_Memory;
}
SDL_AppResult SDL_AppInit( void** appstate, int, char** )
{
SDL_Init( SDL_INIT_VIDEO | SDL_INIT_EVENTS );
Blaze::Global::g_Memory.init( 128_MiB );
*appstate = AppState_Create( &Blaze::Global::g_Memory, WIDTH, HEIGHT );
if ( !*appstate ) return SDL_APP_FAILURE;
AppState& appState = *static_cast<AppState*>( *appstate );
Entity const* entity = LoadModel( appState.renderDevice, appState.entityManager, "Assets/Models/DamagedHelmet.glb" );
ASSERT( entity );
std::array pointLight = {
MiscData::PointLight{
.position = { 12.0f, 0.0f, 0.0f },
.range = 12,
.color = { 1.0f, 0.0f, 0.0f },
.attenuation = 1.0f,
},
MiscData::PointLight{
.position = { 0.0f, 3.0f, 0.0f },
.range = 12,
.color = { 12.0f, 12.0f, 12.0f },
.attenuation = 1.0f,
},
MiscData::PointLight{
.position = { 0.0f, 0.0f, -12.0f },
.range = 6,
.color = { 0.0f, 0.0f, 1.0f },
.attenuation = 1.0f,
},
};
appState.miscData->lightData.pointLightCount = static_cast<uint32_t>( pointLight.size() );
appState.renderDevice->bufferManager->writeToBuffer(
appState.miscData->pointLights, std::span{ pointLight.begin(), pointLight.end() } );
memcpy(
appState.miscData->cameraUniformBufferPtr + sizeof( MiscData::CameraData ),
&appState.miscData->lightData,
sizeof appState.miscData->lightData );
return SDL_APP_CONTINUE;
}
SDL_AppResult SDL_AppIterate( void* appstate )
{
AppState& appState = *static_cast<AppState*>( appstate );
RenderDevice& renderDevice = *appState.renderDevice;
EntityManager& entityManager = *appState.entityManager;
MiscData& misc = *appState.miscData;
Frame& currentFrame = renderDevice.frames[renderDevice.frameIndex];
VK_CHECK( vkWaitForFences( renderDevice.device, 1, &currentFrame.frameReadyToReuse, VK_TRUE, UINT32_MAX ) );
// All resources of frame 'frameIndex' are free.
// time calc
uint64_t const previousCounter = misc.previousCounter;
uint64_t const currentCounter = SDL_GetPerformanceCounter();
uint64_t const deltaCount = currentCounter - previousCounter;
uint64_t const perfFreq = SDL_GetPerformanceFrequency();
double const deltaTime = static_cast<double>( deltaCount ) / static_cast<double>( perfFreq );
misc.previousCounter = currentCounter;
{
misc.frameTimeSum -= misc.frameTime[misc.frameTimeWriteHead];
misc.frameTime[misc.frameTimeWriteHead] = deltaTime;
misc.frameTimeSum += deltaTime;
misc.frameTimeWriteHead = ( misc.frameTimeWriteHead + 1 ) % misc.frameTimeEntryCount;
double avgDeltaTime = ( misc.frameTimeSum / misc.frameTimeEntryCount );
double fps = 1.0 / avgDeltaTime;
double avgDeltaTimeMs = 1000.0 * avgDeltaTime;
( void )sprintf_s<256>( appState.sprintfBuffer, "%.2f fps %.2f ms", fps, avgDeltaTimeMs );
SDL_SetWindowTitle( appState.window, appState.sprintfBuffer );
}
for ( Entity& entity : entityManager.iter() )
{
if ( not entity.isRoot() ) continue;
entity.transform.rotation = DirectX::XMQuaternionMultiply(
DirectX::XMQuaternionRotationAxis(
DirectX::XMVectorSet( 0.0f, 1.0f, 0.0f, 0.0f ),
DirectX::XMConvertToRadians( 60.0f ) * static_cast<float>( deltaTime ) ),
entity.transform.rotation );
}
uint32_t currentImageIndex;
VK_CHECK( vkAcquireNextImageKHR(
renderDevice.device,
renderDevice.swapchain,
std::numeric_limits<uint32_t>::max(),
currentFrame.imageAcquiredSemaphore,
nullptr,
&currentImageIndex ) );
// TODO: Resize Swapchain if required.
VK_CHECK( vkResetFences( renderDevice.device, 1, &currentFrame.frameReadyToReuse ) );
VK_CHECK( vkResetCommandPool( renderDevice.device, currentFrame.commandPool, 0 ) );
misc.acquireToRenderBarrier.image = renderDevice.swapchainImages[currentImageIndex];
misc.renderToPresentBarrier.image = renderDevice.swapchainImages[currentImageIndex];
VkCommandBuffer cmd = currentFrame.commandBuffer;
VkCommandBufferBeginInfo constexpr beginInfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.pNext = nullptr,
.flags = 0,
.pInheritanceInfo = nullptr,
};
VkClearColorValue constexpr static BLACK_CLEAR = {
.float32 = { 0.0f, 0.0f, 0.0f, 1.0f },
};
VkClearDepthStencilValue constexpr static DEPTH_STENCIL_CLEAR = {
.depth = 1.0f,
.stencil = 0,
};
VK_CHECK( vkBeginCommandBuffer( cmd, &beginInfo ) );
{
VkRenderingAttachmentInfo const depthAttachmentInfo = {
.sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
.pNext = nullptr,
.imageView = currentFrame.depthView,
.imageLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL,
.resolveMode = VK_RESOLVE_MODE_NONE,
.resolveImageView = nullptr,
.resolveImageLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
.clearValue = { .depthStencil = DEPTH_STENCIL_CLEAR },
};
VkRenderingAttachmentInfo const attachmentInfo = {
.sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
.pNext = nullptr,
.imageView = renderDevice.swapchainViews[currentImageIndex],
.imageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
.resolveMode = VK_RESOLVE_MODE_NONE,
.resolveImageView = nullptr,
.resolveImageLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
.clearValue = { .color = BLACK_CLEAR },
};
VkRenderingInfo renderingInfo = {
.sType = VK_STRUCTURE_TYPE_RENDERING_INFO,
.pNext = nullptr,
.flags = 0,
.renderArea = { .offset = { 0, 0 }, .extent = renderDevice.swapchainExtent },
.layerCount = 1,
.viewMask = 0,
.colorAttachmentCount = 1,
.pColorAttachments = &attachmentInfo,
.pDepthAttachment = &depthAttachmentInfo,
.pStencilAttachment = nullptr,
};
vkCmdPipelineBarrier2( cmd, &misc.acquireToRenderDependency );
vkCmdBeginRendering( cmd, &renderingInfo );
{
VkViewport viewport = {
.x = 0,
.y = static_cast<float>( renderDevice.swapchainExtent.height ),
.width = static_cast<float>( renderDevice.swapchainExtent.width ),
.height = -static_cast<float>( renderDevice.swapchainExtent.height ),
.minDepth = 0.0f,
.maxDepth = 1.0f,
};
vkCmdSetViewport( cmd, 0, 1, &viewport );
VkRect2D scissor = {
.offset = { 0, 0 },
.extent = renderDevice.swapchainExtent,
};
vkCmdSetScissor( cmd, 0, 1, &scissor );
// Render Something?
vkCmdBindPipeline( cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, misc.meshPipeline );
vkCmdBindDescriptorSets(
cmd,
VK_PIPELINE_BIND_POINT_GRAPHICS,
misc.pipelineLayout,
0,
1,
&renderDevice.textureManager->descriptorSet(),
0,
nullptr );
vkCmdBindDescriptorSets(
cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, misc.pipelineLayout, 1, 1, &misc.descriptorSet, 0, nullptr );
std::function<void( Entity const&, DirectX::XMMATRIX const&, Model const* )> drawEntity =
[&]( Entity const& entity, DirectX::XMMATRIX const& parent, Model const* current )
{
Transform const& localTransform = entity.transform;
DirectX::XMMATRIX worldTransform;
{
worldTransform =
DirectX::XMMatrixAffineTransformation(
localTransform.scale, DirectX::XMVectorZero(), localTransform.rotation, localTransform.translation ) *
parent;
}
if ( not entity.model.isNull() )
{
VkBuffer const vertexBuffer = renderDevice.bufferManager->fetchBuffer( entity.model.vertexBuffer ).value();
VkBuffer const indexBuffer = renderDevice.bufferManager->fetchBuffer( entity.model.indexBuffer ).value();
VkDeviceSize constexpr offset = 0;
vkCmdBindVertexBuffers( cmd, 0, 1, &vertexBuffer, &offset );
vkCmdBindIndexBuffer( cmd, indexBuffer, offset, VK_INDEX_TYPE_UINT32 );
}
vkCmdPushConstants(
cmd, misc.pipelineLayout, VK_SHADER_STAGE_ALL_GRAPHICS, 0, sizeof worldTransform, &worldTransform );
DirectX::XMMATRIX const inverseTransform = XMMatrixInverse( nullptr, worldTransform );
vkCmdPushConstants(
cmd,
misc.pipelineLayout,
VK_SHADER_STAGE_ALL_GRAPHICS,
sizeof worldTransform,
sizeof inverseTransform,
&inverseTransform );
if ( not entity.modelMesh.isNull() )
{
ASSERT( current );
for ( Primitive const& primitive : std::span{ current->primitives.data() + entity.modelMesh.primitiveStart,
entity.modelMesh.primitiveCount } )
{
byte const* materialData = nullptr;
if ( primitive.material != UINT32_MAX )
{
Material const* mat = &current->materials[primitive.material];
materialData = reinterpret_cast<byte const*>( mat );
materialData += Material::GPU_DATA_OFFSET;
}
else
{
materialData = reinterpret_cast<byte const*>( &DEFAULT_MATERIAL );
materialData += Material::GPU_DATA_OFFSET;
}
vkCmdPushConstants(
cmd,
misc.pipelineLayout,
VK_SHADER_STAGE_ALL_GRAPHICS,
2 * sizeof worldTransform,
Material::GPU_DATA_SIZE,
materialData );
vkCmdDrawIndexed( cmd, primitive.indexCount, 1, primitive.indexStart, primitive.vertexOffset, 0 );
}
}
for ( Entity& child : entity.children() )
{
drawEntity( child, worldTransform, entity.model.isNull() ? current : &entity.model );
}
};
for ( Entity const& entity : entityManager.iter() )
{
if ( not entity.isRoot() )
{
continue;
}
drawEntity( entity, DirectX::XMMatrixIdentity(), nullptr );
}
}
vkCmdEndRendering( cmd );
vkCmdPipelineBarrier2( cmd, &misc.renderToPresentDependency );
}
VK_CHECK( vkEndCommandBuffer( cmd ) );
VkPipelineStageFlags stageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
VkSubmitInfo const submitInfo = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = nullptr,
.waitSemaphoreCount = 1,
.pWaitSemaphores = &currentFrame.imageAcquiredSemaphore,
.pWaitDstStageMask = &stageMask,
.commandBufferCount = 1,
.pCommandBuffers = &cmd,
.signalSemaphoreCount = 1,
.pSignalSemaphores = &currentFrame.renderFinishedSemaphore,
};
VK_CHECK( vkQueueSubmit( renderDevice.directQueue, 1, &submitInfo, currentFrame.frameReadyToReuse ) );
VkPresentInfoKHR const presentInfo = {
.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
.pNext = nullptr,
.waitSemaphoreCount = 1,
.pWaitSemaphores = &currentFrame.renderFinishedSemaphore,
.swapchainCount = 1,
.pSwapchains = &renderDevice.swapchain,
.pImageIndices = &currentImageIndex,
.pResults = nullptr,
};
VK_CHECK( vkQueuePresentKHR( renderDevice.directQueue, &presentInfo ) );
renderDevice.frameIndex = ( renderDevice.frameIndex + 1 ) % NUM_FRAMES;
return SDL_APP_CONTINUE;
}
SDL_AppResult SDL_AppEvent( void*, SDL_Event* event )
{
if ( event->type == SDL_EVENT_QUIT )
{
return SDL_APP_SUCCESS;
}
return SDL_APP_CONTINUE;
}
void SDL_AppQuit( void* appstate, SDL_AppResult )
{
AppState* appState = static_cast<AppState*>( appstate );
if ( appState ) appState->destroy();
Blaze::Global::g_Memory.destroy();
}

View File

@ -166,8 +166,6 @@
</CustomBuild>
</ItemDefinitionGroup>
<ItemGroup>
<None Include=".clang-format" />
<None Include=".gitignore" />
<CustomBuild Include="Assets\Shaders\Mesh.slang">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</ExcludedFromBuild>
@ -176,6 +174,14 @@
<Message Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Compiling %(Filename).slang</Message>
<Outputs Condition="'$(Configuration)|$(Platform)'=='Release|x64'">%(Filename).spv</Outputs>
</CustomBuild>
<None Include="..\.clang-format" />
<None Include="..\.gitattributes" />
<None Include="..\.gitignore" />
<None Include="..\LICENSE" />
<None Include="..\PLAN.md" />
<None Include="..\README.md" />
<None Include="..\vcpkg-configuration.json" />
<None Include="..\vcpkg.json" />
<None Include="Assets\Shaders\Bindless.slang">
<FileType>Document</FileType>
<Command Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">slangc %(FullPath) -profile sm_6_6 -target module -o %(Filename).slang-module</Command>
@ -183,50 +189,42 @@
</None>
<None Include="Assets\Shaders\Material.slang" />
<None Include="Assets\Shaders\PBR.slang" />
<None Include="PLAN.md">
<SubType>
</SubType>
</None>
<None Include="README.md" />
<None Include="vcpkg-configuration.json" />
<None Include="vcpkg.json" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="Blaze\AppState.h" />
<ClInclude Include="Blaze\BufferManager.h" />
<ClInclude Include="Blaze\EntityManager.h" />
<ClInclude Include="Blaze\Frame.h" />
<ClInclude Include="Blaze\FreeList.h" />
<ClInclude Include="Blaze\GlobalMemory.h" />
<ClInclude Include="Blaze\MacroUtils.h" />
<ClInclude Include="Blaze\MathUtil.h" />
<ClInclude Include="Blaze\MiscData.h" />
<ClInclude Include="Blaze\ModelLoader.h" />
<ClInclude Include="Blaze\RenderDevice.h" />
<ClInclude Include="Blaze\RID.h" />
<ClInclude Include="Blaze\TextureManager.h" />
<ClInclude Include="Blaze\VulkanHeader.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="Blaze\AppState.cpp" />
<ClCompile Include="Blaze\Blaze.cpp" />
<ClCompile Include="Blaze\BufferManager.cpp" />
<ClCompile Include="Blaze\CgltfImpl.cpp" />
<ClCompile Include="Blaze\EntityManager.cpp" />
<ClCompile Include="Blaze\Frame.cpp" />
<ClCompile Include="Blaze\FreeList.cpp" />
<ClCompile Include="Blaze\GlobalMemory.cpp" />
<ClCompile Include="Blaze\MiscData.cpp" />
<ClCompile Include="Blaze\ModelLoader.cpp" />
<ClCompile Include="Blaze\RenderDevice.cpp" />
<ClCompile Include="Blaze\StbImpl.cpp" />
<ClCompile Include="Blaze\TextureManager.cpp" />
<ClCompile Include="Blaze\VmaImpl.cpp" />
<ClCompile Include="Source\AppState.cpp" />
<ClCompile Include="Source\Blaze.cpp" />
<ClCompile Include="Source\BufferManager.cpp" />
<ClCompile Include="Source\CgltfImpl.cpp" />
<ClCompile Include="Source\EntityManager.cpp" />
<ClCompile Include="Source\Frame.cpp" />
<ClCompile Include="Source\FreeList.cpp" />
<ClCompile Include="Source\GlobalMemory.cpp" />
<ClCompile Include="Source\MiscData.cpp" />
<ClCompile Include="Source\ModelLoader.cpp" />
<ClCompile Include="Source\RenderDevice.cpp" />
<ClCompile Include="Source\StbImpl.cpp" />
<ClCompile Include="Source\TextureManager.cpp" />
<ClCompile Include="Source\VmaImpl.cpp" />
</ItemGroup>
<ItemGroup>
<Image Include="Assets\Textures\container2.png" />
<Image Include="Assets\Textures\wall.jpg" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="Source\AppState.h" />
<ClInclude Include="Source\BufferManager.h" />
<ClInclude Include="Source\EntityManager.h" />
<ClInclude Include="Source\Frame.h" />
<ClInclude Include="Source\FreeList.h" />
<ClInclude Include="Source\GlobalMemory.h" />
<ClInclude Include="Source\MacroUtils.h" />
<ClInclude Include="Source\MiscData.h" />
<ClInclude Include="Source\ModelLoader.h" />
<ClInclude Include="Source\RenderDevice.h" />
<ClInclude Include="Source\RID.h" />
<ClInclude Include="Source\TextureManager.h" />
<ClInclude Include="Source\VulkanHeader.h" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>

View File

@ -25,6 +25,18 @@
<Filter Include="Resource Files\Config">
<UniqueIdentifier>{1f13daa9-d8c1-4fda-b5a5-09ed652775e7}</UniqueIdentifier>
</Filter>
<Filter Include="Header Files\Util">
<UniqueIdentifier>{6b9bb9b7-fdb7-49cc-8427-707aaa6f1188}</UniqueIdentifier>
</Filter>
<Filter Include="Header Files\Render">
<UniqueIdentifier>{62595ae4-a41b-43d1-b506-c6e3409aee5c}</UniqueIdentifier>
</Filter>
<Filter Include="Source Files\Util">
<UniqueIdentifier>{3efea666-256f-4bf9-974f-5d43717b8364}</UniqueIdentifier>
</Filter>
<Filter Include="Source Files\Render">
<UniqueIdentifier>{ad0bf107-cc94-4190-844a-748793cabc17}</UniqueIdentifier>
</Filter>
</ItemGroup>
<ItemGroup>
<None Include="README.md">
@ -56,92 +68,51 @@
</None>
</ItemGroup>
<ItemGroup>
<ClInclude Include="Blaze\AppState.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Blaze\Frame.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Blaze\GlobalMemory.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Blaze\MacroUtils.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Blaze\MathUtil.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Blaze\MiscData.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Blaze\RenderDevice.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Blaze\EntityManager.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Blaze\TextureManager.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Blaze\VulkanHeader.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Blaze\BufferManager.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Blaze\FreeList.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Blaze\RID.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Blaze\ModelLoader.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<ClCompile Include="Blaze\AppState.cpp">
<ClCompile Include="Source\AppState.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Blaze\Blaze.cpp">
<ClCompile Include="Source\Blaze.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Blaze\Frame.cpp">
<ClCompile Include="Source\EntityManager.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Blaze\GlobalMemory.cpp">
<ClCompile Include="Source\MiscData.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Blaze\MiscData.cpp">
<ClCompile Include="Source\ModelLoader.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Blaze\RenderDevice.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Blaze\StbImpl.cpp">
<ClCompile Include="Source\StbImpl.cpp">
<Filter>Source Files\HeaderOnlyImpl</Filter>
</ClCompile>
<ClCompile Include="Blaze\VmaImpl.cpp">
<ClCompile Include="Source\VmaImpl.cpp">
<Filter>Source Files\HeaderOnlyImpl</Filter>
</ClCompile>
<ClCompile Include="Blaze\EntityManager.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Blaze\TextureManager.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Blaze\BufferManager.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Blaze\FreeList.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Blaze\ModelLoader.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Blaze\CgltfImpl.cpp">
<ClCompile Include="Source\CgltfImpl.cpp">
<Filter>Source Files\HeaderOnlyImpl</Filter>
</ClCompile>
<ClCompile Include="Source\FreeList.cpp">
<Filter>Source Files\Util</Filter>
</ClCompile>
<ClCompile Include="Source\GlobalMemory.cpp">
<Filter>Source Files\Util</Filter>
</ClCompile>
<ClCompile Include="Source\BufferManager.cpp">
<Filter>Source Files\Render</Filter>
</ClCompile>
<ClCompile Include="Source\TextureManager.cpp">
<Filter>Source Files\Render</Filter>
</ClCompile>
<ClCompile Include="Source\RID.cpp">
<Filter>Source Files\Render</Filter>
</ClCompile>
<ClCompile Include="Source\RenderDevice.cpp">
<Filter>Source Files\Render</Filter>
</ClCompile>
<ClCompile Include="Source\Frame.cpp">
<Filter>Source Files\Render</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<Image Include="Assets\Textures\container2.png">
@ -156,4 +127,48 @@
<Filter>Resource Files\Shader Files</Filter>
</CustomBuild>
</ItemGroup>
<ItemGroup>
<ClInclude Include="Source\AppState.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Source\EntityManager.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Source\MiscData.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Source\ModelLoader.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Source\MathUtil.h">
<Filter>Header Files\Util</Filter>
</ClInclude>
<ClInclude Include="Source\MacroUtils.h">
<Filter>Header Files\Util</Filter>
</ClInclude>
<ClInclude Include="Source\FreeList.h">
<Filter>Header Files\Util</Filter>
</ClInclude>
<ClInclude Include="Source\GlobalMemory.h">
<Filter>Header Files\Util</Filter>
</ClInclude>
<ClInclude Include="Source\VulkanHeader.h">
<Filter>Header Files\Util</Filter>
</ClInclude>
<ClInclude Include="Source\BufferManager.h">
<Filter>Header Files\Render</Filter>
</ClInclude>
<ClInclude Include="Source\Frame.h">
<Filter>Header Files\Render</Filter>
</ClInclude>
<ClInclude Include="Source\RenderDevice.h">
<Filter>Header Files\Render</Filter>
</ClInclude>
<ClInclude Include="Source\RID.h">
<Filter>Header Files\Render</Filter>
</ClInclude>
<ClInclude Include="Source\TextureManager.h">
<Filter>Header Files\Render</Filter>
</ClInclude>
</ItemGroup>
</Project>

View File

@ -1,337 +0,0 @@
#include "BufferManager.h"
#include "GlobalMemory.h"
template struct RID<Buffer>;
void BufferManager::destroyBuffer( Buffer& buf )
{
if ( not buf.buffer ) return;
ASSERT( m_pRenderDevice );
uint32_t const index = buf.index;
uint32_t const innerIndex = index & INDEX_MASK;
uint32_t const generation = ( index & GENERATION_MASK ) >> GENERATION_OFFSET;
RenderDevice const& renderDevice = *m_pRenderDevice;
vmaDestroyBuffer( renderDevice.gpuAllocator, Take( buf.buffer ), Take( buf.allocation ) );
buf.size = 0;
buf.mappedData = nullptr;
buf.index = innerIndex | ( generation + 1 ) << GENERATION_OFFSET;
// NOTE: DO NOT EDIT INNER INDEX.
ASSERT( innerIndex == ( buf.index & INDEX_MASK ) and "Index should not be modified" );
ASSERT( buf.index > index and "Generation should increase." );
m_freeList.pushBack( reinterpret_cast<FreeList::Node*>( &buf ) );
--m_count;
}
Buffer& BufferManager::fetchBufferUnchecked( BufferID const& rid )
{
uint32_t const index = *reinterpret_cast<uint32_t const*>( &rid );
uint32_t const innerIndex = index & INDEX_MASK;
return m_aBuffers[innerIndex];
}
void BufferManager::writeToBufferImpl( BufferID const& rid, void const* data, size_t const size )
{
ASSERT( isValidID( rid ) );
Buffer const& buffer = fetchBufferUnchecked( rid );
ASSERT( size <= buffer.size );
memcpy( buffer.mappedData, data, size );
}
bool BufferManager::isValidID( BufferID const& rid ) const
{
uint32_t const index = *reinterpret_cast<uint32_t const*>( &rid );
uint32_t const innerIndex = index & INDEX_MASK;
if ( innerIndex > m_capacity ) return false;
return m_aBuffers[innerIndex].index == index;
}
std::optional<BufferID> BufferManager::createVertexBuffer( size_t const size )
{
if ( m_freeList.empty() )
{
return std::nullopt;
}
Buffer* bufferSlot = reinterpret_cast<Buffer*>( m_freeList.popFront() );
++m_count;
ASSERT( m_pRenderDevice );
RenderDevice const& renderDevice = *m_pRenderDevice;
VkBufferCreateInfo const bufferCreateInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = size,
.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr allocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocationInfo;
VkBuffer vertexBuffer;
VmaAllocation vertexBufferAllocation;
VK_CHECK( vmaCreateBuffer(
renderDevice.gpuAllocator,
&bufferCreateInfo,
&allocationCreateInfo,
&vertexBuffer,
&vertexBufferAllocation,
&allocationInfo ) );
// NOTE: textureSlot preserves index between uses.
uint32_t index = bufferSlot->index;
new ( bufferSlot ) Buffer{
.buffer = vertexBuffer,
.allocation = vertexBufferAllocation,
.mappedData = static_cast<std::byte*>( allocationInfo.pMappedData ),
.deviceAddress = 0,
.size = size,
.index = index,
};
// NOTE: Memory hackery to create TextureID;
return std::move( *reinterpret_cast<BufferID*>( &index ) );
}
std::optional<BufferID> BufferManager::createIndexBuffer( size_t size )
{
if ( m_freeList.empty() )
{
return std::nullopt;
}
Buffer* bufferSlot = reinterpret_cast<Buffer*>( m_freeList.popFront() );
++m_count;
ASSERT( m_pRenderDevice );
RenderDevice const& renderDevice = *m_pRenderDevice;
VkBufferCreateInfo const bufferCreateInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = size,
.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr allocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocationInfo;
VkBuffer indexBuffer;
VmaAllocation indexBufferAllocation;
VK_CHECK( vmaCreateBuffer(
renderDevice.gpuAllocator,
&bufferCreateInfo,
&allocationCreateInfo,
&indexBuffer,
&indexBufferAllocation,
&allocationInfo ) );
// NOTE: bufferSlot preserves index between uses.
uint32_t index = bufferSlot->index;
new ( bufferSlot ) Buffer{
.buffer = indexBuffer,
.allocation = indexBufferAllocation,
.mappedData = static_cast<std::byte*>( allocationInfo.pMappedData ),
.deviceAddress = 0,
.size = size,
.index = index,
};
// NOTE: Memory hackery to create BufferID;
return std::move( *reinterpret_cast<BufferID*>( &index ) );
}
std::optional<BufferID> BufferManager::createStorageBuffer( size_t size )
{
if ( m_freeList.empty() )
{
return std::nullopt;
}
Buffer* bufferSlot = reinterpret_cast<Buffer*>( m_freeList.popFront() );
++m_count;
ASSERT( m_pRenderDevice );
RenderDevice const& renderDevice = *m_pRenderDevice;
VkBufferCreateInfo const bufferCreateInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = size,
.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr allocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocationInfo;
VkBuffer storageBuffer;
VmaAllocation storageBufferAllocation;
VK_CHECK( vmaCreateBuffer(
renderDevice.gpuAllocator,
&bufferCreateInfo,
&allocationCreateInfo,
&storageBuffer,
&storageBufferAllocation,
&allocationInfo ) );
VkBufferDeviceAddressInfo const deviceAddressInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO,
.pNext = nullptr,
.buffer = storageBuffer,
};
VkDeviceAddress const deviceAddress = vkGetBufferDeviceAddress( renderDevice.device, &deviceAddressInfo );
// NOTE: bufferSlot preserves index between uses.
uint32_t index = bufferSlot->index;
new ( bufferSlot ) Buffer{
.buffer = storageBuffer,
.allocation = storageBufferAllocation,
.mappedData = static_cast<std::byte*>( allocationInfo.pMappedData ),
.deviceAddress = deviceAddress,
.size = size,
.index = index,
};
// NOTE: Memory hackery to create BufferID;
return std::move( *reinterpret_cast<BufferID*>( &index ) );
}
void BufferManager::freeBuffer( BufferID&& rid )
{
if ( not isValidID( rid ) ) return;
Buffer& buffer = fetchBufferUnchecked( rid );
destroyBuffer( buffer );
auto _ = std::move( rid );
}
std::optional<VkBuffer> BufferManager::fetchBuffer( BufferID const& rid )
{
if ( not isValidID( rid ) ) return std::nullopt;
return fetchBufferUnchecked( rid ).buffer;
}
std::optional<VkDeviceAddress> BufferManager::fetchDeviceAddress( BufferID const& rid )
{
if ( not isValidID( rid ) ) return std::nullopt;
Buffer const& buffer = fetchBufferUnchecked( rid );
if ( buffer.deviceAddress == 0 ) return std::nullopt;
return buffer.deviceAddress;
}
BufferManager::BufferManager( RenderDevice* pRenderDevice, Buffer* aBuffers, uint32_t const capacity )
: m_pRenderDevice{ pRenderDevice }, m_aBuffers{ aBuffers }, m_count{ 0 }, m_capacity{ capacity }
{
uint32_t i = 0;
for ( Buffer& tex : std::span{ m_aBuffers, m_capacity } )
{
// Default Generation is 1
tex.index = i++ | ( 1 << GENERATION_OFFSET );
m_freeList.pushFront( reinterpret_cast<FreeList::Node*>( &tex ) );
}
}
void BufferManager::destroy()
{
#if defined( _DEBUG )
if ( m_count > 0 )
{
SDL_LogError( SDL_LOG_CATEGORY_ERROR, "%u buffers still allocated.", m_count );
}
#endif
while ( not m_freeList.empty() )
{
Buffer* buf = reinterpret_cast<Buffer*>( m_freeList.popFront() );
memset( buf, 0, sizeof *buf );
}
for ( Buffer& buf : std::span{ m_aBuffers, m_count } )
{
destroyBuffer( buf );
}
}
BufferManager::~BufferManager()
{
ASSERT( not m_aBuffers );
}
BufferManager* BufferManager_Create( GlobalMemory* mem, RenderDevice* renderDevice, uint32_t maxCount )
{
Buffer* buffers = reinterpret_cast<Buffer*>( mem->allocate( maxCount * sizeof( Buffer ), alignof( Buffer ) ) );
if ( not buffers ) return nullptr;
std::byte* allocation = mem->allocate( sizeof( BufferManager ), alignof( BufferManager ) );
if ( not allocation ) return nullptr;
return new ( allocation ) BufferManager{ renderDevice, buffers, maxCount };
}

View File

@ -1,87 +0,0 @@
#pragma once
#include <optional>
#include <span>
#include "FreeList.h"
#include "MacroUtils.h"
#include "RID.h"
#include "RenderDevice.h"
#include "VulkanHeader.h"
struct GlobalMemory;
struct RenderDevice;
struct Buffer
{
VkBuffer buffer;
VmaAllocation allocation;
std::byte* mappedData; // Assume the system has ReBAR/SAM enabled.
VkDeviceAddress deviceAddress;
size_t size;
uint32_t index;
};
static_assert( sizeof( Buffer ) > sizeof( FreeList::Node ) and "Buffer is used intrusively by FreeList" );
static_assert(
offsetof( Buffer, index ) >= sizeof( FreeList::Node ) and "Index should not be overwritten even in invalid state" );
extern template struct RID<Buffer>;
using BufferID = RID<Buffer>;
struct BufferManager
{
private:
constexpr static uint32_t INDEX_MASK = 0x0007FFFF;
constexpr static uint32_t GENERATION_MASK = ~INDEX_MASK;
constexpr static uint32_t GENERATION_OFFSET = 19;
static_assert(
( ( GENERATION_MASK >> GENERATION_OFFSET & 0x1 ) == 0x1 ) and
( ( GENERATION_MASK >> ( GENERATION_OFFSET - 1 ) & 0x1 ) != 0x1 ) and "Checks boundary" );
RenderDevice* m_pRenderDevice;
// Texture Manager
Buffer* m_aBuffers;
uint32_t m_count;
uint32_t m_capacity;
FreeList m_freeList;
void destroyBuffer( Buffer& buf );
Buffer& fetchBufferUnchecked( BufferID const& rid );
void writeToBufferImpl( BufferID const& rid, void const* data, size_t size );
public:
[[nodiscard]] bool isValidID( BufferID const& rid ) const;
std::optional<BufferID> createVertexBuffer( size_t size );
std::optional<BufferID> createIndexBuffer( size_t size );
std::optional<BufferID> createStorageBuffer( size_t size );
void freeBuffer( BufferID&& rid );
DEPRECATE_JULY_2025
std::optional<VkBuffer> fetchBuffer( BufferID const& rid );
std::optional<VkDeviceAddress> fetchDeviceAddress( BufferID const& rid );
void writeToBuffer( BufferID const& rid, std::ranges::contiguous_range auto const& data )
{
writeToBufferImpl(
rid,
std::ranges::data( data ),
std::ranges::size( data ) * sizeof( std::ranges::range_value_t<decltype( data )> ) );
}
//
BufferManager( RenderDevice* pRenderDevice, Buffer* aBuffers, uint32_t capacity );
void destroy();
BufferManager( BufferManager const& other ) = delete;
BufferManager( BufferManager&& other ) noexcept = delete;
BufferManager& operator=( BufferManager const& other ) = delete;
BufferManager& operator=( BufferManager&& other ) noexcept = delete;
~BufferManager();
};
BufferManager* BufferManager_Create( GlobalMemory* mem, RenderDevice* renderDevice, uint32_t maxCount );

View File

@ -1,175 +0,0 @@
#include "EntityManager.h"
#include <array>
#include "GlobalMemory.h"
#include "RenderDevice.h"
#include "Frame.h"
#include "TextureManager.h"
Entity& EntitySiblingIterable::Iterator::operator++()
{
current = current->nextSibling();
return *current;
}
bool EntitySiblingIterable::Iterator::operator==( Iterator const& other ) const
{
return current == other.current;
}
Entity& EntitySiblingIterable::Iterator::operator*() const
{
return *current;
}
EntitySiblingIterable::Iterator EntitySiblingIterable::begin()
{
return { current };
}
EntitySiblingIterable::Iterator EntitySiblingIterable::end()
{
return {};
}
void Entity::setParent( Entity* parent )
{
ASSERT( parent );
if ( m_parent == parent ) return;
removeParent();
// Insert self into parent.
m_parent = parent;
Entity* oldHead = parent->m_firstChild;
if ( oldHead )
{
// Old head is next after this
this->m_nextSibling = oldHead;
// This is prev to old head
oldHead->m_prevSibling = this;
}
// We are the head now.
m_parent->m_firstChild = this;
}
void Entity::addChild( Entity* child )
{
child->setParent( this );
}
void Entity::removeChild( Entity* child )
{
ASSERT( child );
child->removeParent();
}
void Entity::removeParent()
{
if ( m_parent )
{
// Replace prev of next with prev of self
if ( m_nextSibling ) m_nextSibling->m_prevSibling = m_prevSibling;
// Replace next of prev with next of self
if ( m_prevSibling )
{
m_prevSibling->m_nextSibling = m_nextSibling;
}
else
{
// We are head of chain
m_parent->m_firstChild = m_nextSibling;
}
m_nextSibling = nullptr;
m_prevSibling = nullptr;
m_parent = nullptr;
}
}
EntitySiblingIterable Entity::children() const
{
return { m_firstChild };
}
Entity::Entity( Transform const& transform )
: transform{ transform }
, model{}
, modelMesh{}
, m_parent{ nullptr }
, m_firstChild{ nullptr }
, m_prevSibling{ nullptr }
, m_nextSibling{ nullptr }
, m_flags{ 0 }
{}
Entity* EntityManager::createEntity( Transform const& transform )
{
ASSERT( count < capacity );
Entity& entity = entities[count++];
new ( &entity ) Entity{ transform };
return &entity;
}
void EntityManager::destroyEntity( Entity* entity )
{
ASSERT( entity );
VkDevice const device = pRenderDevice->device;
if ( not entity->model.isNull() )
{
for ( auto& material : entity->model.materials )
{
vkDestroySampler( device, Take( material.sampler ), nullptr );
pRenderDevice->textureManager->freeTexture( std::move( material.albedoTextureID ) );
pRenderDevice->textureManager->freeTexture( std::move( material.normalTextureID ) );
pRenderDevice->textureManager->freeTexture( std::move( material.metalRoughTextureID ) );
pRenderDevice->textureManager->freeTexture( std::move( material.emissiveTextureID ) );
}
pRenderDevice->bufferManager->freeBuffer( std::move( entity->model.vertexBuffer ) );
pRenderDevice->bufferManager->freeBuffer( std::move( entity->model.indexBuffer ) );
entity->model.primitives.clear();
entity->model.materials.clear();
}
entity->modelMesh = { 0, 0 };
}
void EntityManager::destroy()
{
Entity const* end = entities + capacity;
for ( Entity* iter = entities; iter != end; ++iter )
{
destroyEntity( iter );
}
entities = nullptr;
capacity = 0;
count = 0;
}
EntityManager::~EntityManager()
{
assert( !entities );
}
EntityManager* EntityManager_Create( GlobalMemory* mem, RenderDevice* renderDevice, uint32_t const capacity )
{
Entity* data = reinterpret_cast<Entity*>( mem->allocate( capacity * sizeof( Entity ), alignof( Entity ) ) );
memset( data, 0, capacity * sizeof( Entity ) );
std::byte* alloc = mem->allocate( sizeof( EntityManager ), alignof( EntityManager ) );
return new ( alloc ) EntityManager{ renderDevice, data, capacity };
}

View File

@ -1,174 +0,0 @@
#include "Frame.h"
#include <SDL3/SDL_log.h>
#include "MacroUtils.h"
#include "RenderDevice.h"
bool Frame::isInit() const
{
return static_cast<bool>( commandPool );
}
Frame::Frame(
VkCommandPool const commandPool,
VkCommandBuffer const commandBuffer,
VkSemaphore const imageAcquiredSemaphore,
VkSemaphore const renderFinishedSemaphore,
VkFence const frameReadyToReuse,
VkImage const depthImage,
VmaAllocation const depthAllocation,
VkImageView const depthView )
: commandPool{ commandPool }
, commandBuffer{ commandBuffer }
, imageAcquiredSemaphore{ imageAcquiredSemaphore }
, renderFinishedSemaphore{ renderFinishedSemaphore }
, frameReadyToReuse{ frameReadyToReuse }
, depthImage{ depthImage }
, depthAllocation{ depthAllocation }
, depthView{ depthView }
{}
void Frame::destroy( RenderDevice const& renderDevice )
{
if ( !isInit() ) return;
VkDevice const device = renderDevice.device;
vkDestroyImageView( device, Take( depthView ), nullptr );
vmaDestroyImage( renderDevice.gpuAllocator, Take( depthImage ), Take( depthAllocation ) );
vkDestroyCommandPool( device, Take( commandPool ), nullptr );
vkDestroyFence( device, Take( frameReadyToReuse ), nullptr );
vkDestroySemaphore( device, Take( imageAcquiredSemaphore ), nullptr );
vkDestroySemaphore( device, Take( renderFinishedSemaphore ), nullptr );
}
Frame::~Frame()
{
// Manual Cleanup Required.
ASSERT( !isInit() );
}
void Frame_Create(
Frame* frame,
VkDevice const device,
VmaAllocator const gpuAllocator,
uint32_t const directQueueFamilyIndex,
VkExtent2D const swapchainExtent )
{
VkCommandPool commandPool;
VkCommandBuffer commandBuffer;
VkSemaphore imageAcquiredSemaphore;
VkSemaphore renderFinishedSemaphore;
VkFence frameReadyToReuse;
VkImage depthImage;
VmaAllocation depthAllocation;
VkImageView depthView;
{
VkCommandPoolCreateInfo const commandPoolCreateInfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
.pNext = nullptr,
.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT,
.queueFamilyIndex = directQueueFamilyIndex,
};
VK_CHECK( vkCreateCommandPool( device, &commandPoolCreateInfo, nullptr, &commandPool ) );
VkCommandBufferAllocateInfo const commandBufferAllocateInfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
.pNext = nullptr,
.commandPool = commandPool,
.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
.commandBufferCount = 1,
};
VK_CHECK( vkAllocateCommandBuffers( device, &commandBufferAllocateInfo, &commandBuffer ) );
VkSemaphoreCreateInfo constexpr semaphoreCreateInfo = {
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
};
VK_CHECK( vkCreateSemaphore( device, &semaphoreCreateInfo, nullptr, &imageAcquiredSemaphore ) );
VK_CHECK( vkCreateSemaphore( device, &semaphoreCreateInfo, nullptr, &renderFinishedSemaphore ) );
VkFenceCreateInfo constexpr fenceCreateInfo = {
.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
.pNext = nullptr,
.flags = VK_FENCE_CREATE_SIGNALED_BIT,
};
VK_CHECK( vkCreateFence( device, &fenceCreateInfo, nullptr, &frameReadyToReuse ) );
}
{
VkImageCreateInfo const depthImageCreateInfo = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.imageType = VK_IMAGE_TYPE_2D,
.format = VK_FORMAT_D32_SFLOAT,
.extent = { swapchainExtent.width, swapchainExtent.height, 1 },
.mipLevels = 1,
.arrayLayers = 1,
.samples = VK_SAMPLE_COUNT_1_BIT,
.tiling = VK_IMAGE_TILING_OPTIMAL,
.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED
};
VmaAllocationCreateInfo constexpr depthAllocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
.usage = VMA_MEMORY_USAGE_GPU_ONLY,
.requiredFlags = 0,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VK_CHECK( vmaCreateImage(
gpuAllocator, &depthImageCreateInfo, &depthAllocationCreateInfo, &depthImage, &depthAllocation, nullptr ) );
VkImageSubresourceRange constexpr subresourceRange = {
.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkComponentMapping constexpr componentMapping = {
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
.b = VK_COMPONENT_SWIZZLE_IDENTITY,
.a = VK_COMPONENT_SWIZZLE_IDENTITY,
};
VkImageViewCreateInfo const imageViewCreateInfo = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.image = depthImage,
.viewType = VK_IMAGE_VIEW_TYPE_2D,
.format = depthImageCreateInfo.format,
.components = componentMapping,
.subresourceRange = subresourceRange,
};
VK_CHECK( vkCreateImageView( device, &imageViewCreateInfo, nullptr, &depthView ) );
}
frame->commandPool = commandPool;
frame->commandBuffer = commandBuffer;
frame->imageAcquiredSemaphore = imageAcquiredSemaphore;
frame->renderFinishedSemaphore = renderFinishedSemaphore;
frame->frameReadyToReuse = frameReadyToReuse;
frame->depthImage = depthImage;
frame->depthView = depthView;
frame->depthAllocation = depthAllocation;
}

View File

@ -1,48 +0,0 @@
#pragma once
#include <utility>
#include "VulkanHeader.h"
struct RenderDevice;
struct Frame
{
VkCommandPool commandPool;
VkCommandBuffer commandBuffer;
VkSemaphore imageAcquiredSemaphore;
VkSemaphore renderFinishedSemaphore;
VkFence frameReadyToReuse;
VkImage depthImage;
VmaAllocation depthAllocation;
VkImageView depthView;
[[nodiscard]] bool isInit() const;
Frame(
VkCommandPool commandPool,
VkCommandBuffer commandBuffer,
VkSemaphore imageAcquiredSemaphore,
VkSemaphore renderFinishedSemaphore,
VkFence frameReadyToReuse,
VkImage depthImage,
VmaAllocation depthAllocation,
VkImageView depthView );
void destroy( RenderDevice const& renderDevice );
Frame( Frame const& other ) = delete;
Frame( Frame&& other ) noexcept = delete;
Frame& operator=( Frame const& other ) = delete;
Frame& operator=( Frame&& other ) noexcept = delete;
~Frame();
};
void Frame_Create(
Frame* frame,
VkDevice device,
VmaAllocator gpuAllocator,
uint32_t directQueueFamilyIndex,
VkExtent2D swapchainExtent );

View File

@ -1,72 +0,0 @@
#include "FreeList.h"
FreeList::Iterator& FreeList::Iterator::operator++()
{
pIter = pIter->pNext;
return *this;
}
bool FreeList::Iterator::operator==( Iterator const& other ) const
{
return this->pIter == other.pIter;
}
FreeList::Node& FreeList::Iterator::operator*()
{
return *pIter;
}
FreeList::FreeList() : m_head{ .pNext = &m_tail, .pPrev = nullptr }, m_tail{ .pNext = nullptr, .pPrev = &m_head }
{}
void FreeList::pushBack( Node* pNode )
{
Node* prev = m_tail.pPrev;
// Set prev as previous of pNode
prev->pNext = pNode;
pNode->pPrev = prev;
// Set tail as next of pNode
pNode->pNext = &m_tail;
m_tail.pPrev = pNode;
}
void FreeList::pushFront( Node* pNode )
{
Node* next = m_head.pNext;
// Set next as next of pNode
next->pPrev = pNode;
pNode->pNext = next;
// Set head as prev of pNode
pNode->pPrev = &m_head;
m_head.pNext = pNode;
}
FreeList::Node* FreeList::popFront()
{
ASSERT( not empty() );
Node* element = m_head.pNext;
element->pPrev->pNext = element->pNext;
element->pNext->pPrev = element->pPrev;
return element;
}
bool FreeList::empty() const
{
return m_head.pNext == &m_tail;
}
FreeList::Iterator FreeList::begin()
{
return { m_head.pNext };
}
FreeList::Iterator FreeList::end()
{
return { &m_tail };
}

View File

@ -1,73 +0,0 @@
#include "GlobalMemory.h"
#include <SDL3/SDL_log.h>
void GlobalMemory::init( size_t const size )
{
memory = new std::byte[size];
capacity = size;
available = size;
}
void GlobalMemory::destroy()
{
std::byte const* originalMemory = memory - ( capacity - available );
delete[] originalMemory;
memory = nullptr;
available = 0;
capacity = 0;
}
std::byte* GlobalMemory::allocate( size_t const size )
{
assert( size <= available && "No enough space available" );
std::byte* retVal = memory;
memset( retVal, 0, size );
memory += size;
available -= size;
SDL_LogInfo(
SDL_LOG_CATEGORY_SYSTEM,
"ALLOC: %p -> %p (%llu) (avail: %llu)",
reinterpret_cast<void*>( retVal ),
reinterpret_cast<void*>( memory ),
size,
available );
return retVal;
}
std::byte* GlobalMemory::allocate( size_t const size, size_t const alignment )
{
uintptr_t const addr = reinterpret_cast<uintptr_t>( memory );
uintptr_t const foundOffset = addr % alignment;
if ( foundOffset == 0 )
{
return allocate( size );
}
uintptr_t const offset = alignment - foundOffset;
size_t const allocationSize = size + offset;
return offset + allocate( allocationSize );
}
GlobalMemory::State GlobalMemory::getState() const
{
SDL_LogInfo( SDL_LOG_CATEGORY_SYSTEM, "TEMP: %p %llu", reinterpret_cast<void*>( memory ), available );
return {
.memory = memory,
.available = available,
};
}
void GlobalMemory::restoreState( State const& state )
{
ASSERT( memory >= state.memory ); //< Behind top of allocator
ASSERT( memory - ( capacity - available ) <= state.memory ); //< Ahead of start of allocator
SDL_LogInfo( SDL_LOG_CATEGORY_SYSTEM, "RESTORE: %p %llu", reinterpret_cast<void*>( memory ), available );
memory = state.memory;
available = state.available;
}

View File

@ -1,11 +0,0 @@
#pragma once
#include <utility>
using byte = std::byte;
template <std::totally_ordered T>
T Clamp( T const val, T const minVal, T const maxVal )
{
return std::min( maxVal, std::max( val, minVal ) );
}

View File

@ -1,806 +0,0 @@
#include "ModelLoader.h"
#include <algorithm>
#include <array>
#include <memory_resource>
#include <string_view>
#include <DirectXMath.h>
#include <SDL3/SDL_log.h>
#include <cgltf.h>
#include <stb_image.h>
#include "EntityManager.h"
#include "Frame.h"
#include "GlobalMemory.h"
#include "MacroUtils.h"
#include "MathUtil.h"
std::optional<TextureID> LoadTexture(
RenderDevice* renderDevice, VkSampler sampler, cgltf_image const& baseColorImage, bool const linear )
{
byte* data;
if ( baseColorImage.buffer_view->data )
{
data = static_cast<byte*>( baseColorImage.buffer_view->data );
}
else
{
data = static_cast<byte*>( baseColorImage.buffer_view->buffer->data ) + baseColorImage.buffer_view->offset;
}
size_t size = baseColorImage.buffer_view->size;
uint32_t width;
uint32_t height;
uint32_t numChannels = 4;
stbi_uc* textureData;
{
int w;
int h;
int nc;
int requestedChannels = static_cast<int>( numChannels );
textureData = stbi_load_from_memory(
reinterpret_cast<stbi_uc const*>( data ), static_cast<int>( size ), &w, &h, &nc, requestedChannels );
ASSERT( nc <= requestedChannels );
if ( not textureData )
{
return std::nullopt;
}
width = static_cast<uint32_t>( w );
height = static_cast<uint32_t>( h );
}
auto textureOpt = renderDevice->textureManager->createTexture(
{ width, height, 1 }, sampler, linear ? VK_FORMAT_R8G8B8A8_UNORM : VK_FORMAT_R8G8B8A8_SRGB );
if ( not textureOpt )
{
return std::nullopt;
}
TextureID texture = std::move( textureOpt.value() );
VkImage textureImage = renderDevice->textureManager->fetchImage( texture ).value();
// Staging Buffer Create
VkBuffer stagingBuffer;
VmaAllocation stagingAllocation;
{
VkBufferCreateInfo const stagingBufferCreateInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = static_cast<VkDeviceSize>( width ) * height * numChannels * sizeof( textureData[0] ),
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr stagingAllocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocationInfo;
VK_CHECK( vmaCreateBuffer(
renderDevice->gpuAllocator,
&stagingBufferCreateInfo,
&stagingAllocationCreateInfo,
&stagingBuffer,
&stagingAllocation,
&allocationInfo ) );
if ( allocationInfo.pMappedData )
{
memcpy( allocationInfo.pMappedData, textureData, stagingBufferCreateInfo.size );
}
}
// All data is copied to stagingBuffer, don't need this.
stbi_image_free( textureData );
// Staging -> Texture transfer
{
Frame& frameInUse = renderDevice->frames[renderDevice->frameIndex];
// This should just pass.
VK_CHECK( vkWaitForFences( renderDevice->device, 1, &frameInUse.frameReadyToReuse, VK_TRUE, INT64_MAX ) );
// Reset Frame
VK_CHECK( vkResetFences( renderDevice->device, 1, &frameInUse.frameReadyToReuse ) );
VK_CHECK( vkResetCommandPool( renderDevice->device, frameInUse.commandPool, 0 ) );
VkCommandBufferBeginInfo constexpr beginInfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.pNext = nullptr,
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
.pInheritanceInfo = nullptr,
};
uint32_t mipLevels = TextureManager::calculateRequiredMipLevels( width, height, 1 );
VkImageSubresourceRange const subresourceRange = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = mipLevels,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkImageMemoryBarrier2 const creationToTransferImageBarrier = {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT,
.srcAccessMask = VK_ACCESS_2_NONE,
.dstStageMask = VK_PIPELINE_STAGE_2_COPY_BIT,
.dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = renderDevice->textureManager->fetchImage( texture ).value(),
.subresourceRange = subresourceRange,
};
VkDependencyInfo const creationToTransferDependency = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.pNext = nullptr,
.dependencyFlags = 0,
.memoryBarrierCount = 0,
.pMemoryBarriers = nullptr,
.bufferMemoryBarrierCount = 0,
.pBufferMemoryBarriers = nullptr,
.imageMemoryBarrierCount = 1,
.pImageMemoryBarriers = &creationToTransferImageBarrier,
};
std::array transferToReadyImageBarriers{
// transferToReadyImageBarrier
VkImageMemoryBarrier2{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,
.dstAccessMask = VK_ACCESS_2_SHADER_SAMPLED_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = textureImage,
.subresourceRange = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = mipLevels-1,
.baseArrayLayer = 0,
.layerCount = 1,
},
},
VkImageMemoryBarrier2{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,
.dstAccessMask = VK_ACCESS_2_SHADER_SAMPLED_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = textureImage,
.subresourceRange = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = mipLevels-1,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
}
};
VkDependencyInfo const transferToReadyDependency = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.pNext = nullptr,
.dependencyFlags = 0,
.memoryBarrierCount = 0,
.pMemoryBarriers = nullptr,
.bufferMemoryBarrierCount = 0,
.pBufferMemoryBarriers = nullptr,
.imageMemoryBarrierCount = static_cast<uint32_t>( transferToReadyImageBarriers.size() ),
.pImageMemoryBarriers = transferToReadyImageBarriers.data(),
};
VkImageSubresourceRange const mipLevelSubresource = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
};
std::array prepareNextMipLevelBarriers{
// prepareNextMipLevelSrcImageBarrier
VkImageMemoryBarrier2{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.dstAccessMask = VK_ACCESS_2_TRANSFER_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = textureImage,
.subresourceRange = mipLevelSubresource,
},
// prepareNextMipLevelDstImageBarrier
VkImageMemoryBarrier2{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_COPY_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_BLIT_BIT,
.dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = textureImage,
.subresourceRange = mipLevelSubresource,
}
};
VkDependencyInfo const prepareNextMipLevelDependency = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.pNext = nullptr,
.dependencyFlags = 0,
.memoryBarrierCount = 0,
.pMemoryBarriers = nullptr,
.bufferMemoryBarrierCount = 0,
.pBufferMemoryBarriers = nullptr,
.imageMemoryBarrierCount = static_cast<uint32_t>( prepareNextMipLevelBarriers.size() ),
.pImageMemoryBarriers = prepareNextMipLevelBarriers.data(),
};
vkBeginCommandBuffer( frameInUse.commandBuffer, &beginInfo );
{
VkImageSubresourceLayers imageSubresourceLayers = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
};
// TODO: Ensure `bufferRowLength` and `bufferImageHeight` are not required.
VkBufferImageCopy copyRegion = {
.bufferOffset = 0,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = imageSubresourceLayers,
.imageOffset = { 0, 0, 0 },
.imageExtent = { width, height, 1 }
};
// Start
vkCmdPipelineBarrier2( frameInUse.commandBuffer, &creationToTransferDependency );
// Staging -> Image L0
vkCmdCopyBufferToImage(
frameInUse.commandBuffer, stagingBuffer, textureImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copyRegion );
prepareNextMipLevelBarriers[0].subresourceRange.baseMipLevel = 0;
prepareNextMipLevelBarriers[1].subresourceRange.baseMipLevel = 1;
int32_t mipSrcWidth = static_cast<int32_t>( width );
int32_t mipSrcHeight = static_cast<int32_t>( height );
int32_t mipDstWidth = std::max( mipSrcWidth / 2, 1 );
int32_t mipDstHeight = std::max( mipSrcHeight / 2, 1 );
VkImageSubresourceLayers constexpr mipSubresourceLayers = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkImageBlit2 imageBlit = {
.sType = VK_STRUCTURE_TYPE_IMAGE_BLIT_2,
.pNext = nullptr,
.srcSubresource = mipSubresourceLayers,
.srcOffsets = { { 0, 0, 0 }, { mipSrcWidth, mipSrcHeight, 1 } },
.dstSubresource = mipSubresourceLayers,
.dstOffsets = { { 0, 0, 0 }, { mipDstWidth, mipDstHeight, 1 } },
};
imageBlit.srcSubresource.mipLevel = 0;
imageBlit.dstSubresource.mipLevel = 1;
imageBlit.srcOffsets[1].x = mipSrcWidth;
imageBlit.srcOffsets[1].y = mipSrcHeight;
imageBlit.dstOffsets[1].x = mipDstWidth;
imageBlit.dstOffsets[1].y = mipDstHeight;
VkBlitImageInfo2 blitInfo = {
.sType = VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2,
.pNext = nullptr,
.srcImage = textureImage,
.srcImageLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.dstImage = textureImage,
.dstImageLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.regionCount = 1,
.pRegions = &imageBlit,
.filter = VK_FILTER_LINEAR,
};
// MipMapping
for ( uint32_t dstMipLevel = 1; dstMipLevel < mipLevels; ++dstMipLevel )
{
vkCmdPipelineBarrier2( frameInUse.commandBuffer, &prepareNextMipLevelDependency );
vkCmdBlitImage2( frameInUse.commandBuffer, &blitInfo );
// Prep for NEXT iteration
mipSrcWidth = mipDstWidth;
mipSrcHeight = mipDstHeight;
mipDstWidth = std::max( mipSrcWidth / 2, 1 );
mipDstHeight = std::max( mipSrcHeight / 2, 1 );
imageBlit.srcSubresource.mipLevel = dstMipLevel;
imageBlit.dstSubresource.mipLevel = dstMipLevel + 1;
imageBlit.srcOffsets[1].x = mipSrcWidth;
imageBlit.srcOffsets[1].y = mipSrcHeight;
imageBlit.dstOffsets[1].x = mipDstWidth;
imageBlit.dstOffsets[1].y = mipDstHeight;
// Prep current mip level as source
prepareNextMipLevelBarriers[0].subresourceRange.baseMipLevel = dstMipLevel;
prepareNextMipLevelBarriers[1].subresourceRange.baseMipLevel = dstMipLevel + 1;
}
// End
vkCmdPipelineBarrier2( frameInUse.commandBuffer, &transferToReadyDependency );
}
vkEndCommandBuffer( frameInUse.commandBuffer );
VkSubmitInfo submitInfo = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = nullptr,
.waitSemaphoreCount = 0,
.pWaitSemaphores = nullptr,
.pWaitDstStageMask = nullptr,
.commandBufferCount = 1,
.pCommandBuffers = &frameInUse.commandBuffer,
.signalSemaphoreCount = 0,
.pSignalSemaphores = nullptr,
};
VK_CHECK( vkQueueSubmit( renderDevice->directQueue, 1, &submitInfo, frameInUse.frameReadyToReuse ) );
// Do not reset this. Else, the frame will never be available to the main loop.
VK_CHECK( vkWaitForFences( renderDevice->device, 1, &frameInUse.frameReadyToReuse, VK_TRUE, UINT64_MAX ) );
renderDevice->frameIndex = ( renderDevice->frameIndex + 1 ) % renderDevice->getNumFrames();
}
vmaDestroyBuffer( renderDevice->gpuAllocator, stagingBuffer, stagingAllocation );
return texture;
}
// TODO: Cache materials while loading.
uint32_t ProcessMaterial( RenderDevice* renderDevice, Model* model, cgltf_material const& material )
{
ASSERT( material.has_pbr_metallic_roughness );
auto const baseColorFactor = DirectX::XMFLOAT4{ material.pbr_metallic_roughness.base_color_factor };
auto const emissiveFactor = DirectX::XMFLOAT4{
material.emissive_factor[0],
material.emissive_factor[1],
material.emissive_factor[2],
std::max( material.emissive_strength.emissive_strength, 1.0f ),
};
VkSampler sampler = nullptr;
TextureID baseColorTexture;
TextureID normalTexture;
TextureID metalRoughTexture;
TextureID emissiveTexture;
VkSamplerCreateInfo constexpr samplerCreateInfo = {
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.magFilter = VK_FILTER_LINEAR,
.minFilter = VK_FILTER_LINEAR,
.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR,
.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.mipLodBias = 0.0,
.anisotropyEnable = true,
.maxAnisotropy = 1.0f,
.compareEnable = false,
.compareOp = VK_COMPARE_OP_NEVER,
.minLod = 0.0f,
.maxLod = VK_LOD_CLAMP_NONE,
.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK,
.unnormalizedCoordinates = false,
};
VK_CHECK( vkCreateSampler( renderDevice->device, &samplerCreateInfo, nullptr, &sampler ) );
if ( material.pbr_metallic_roughness.base_color_texture.texture )
{
cgltf_image const* baseColorImage = material.pbr_metallic_roughness.base_color_texture.texture->image;
auto baseColorTextureOpt = LoadTexture( renderDevice, sampler, *baseColorImage, false );
if ( not baseColorTextureOpt )
{
vkDestroySampler( renderDevice->device, Take( sampler ), nullptr );
return UINT32_MAX;
}
baseColorTexture = std::move( baseColorTextureOpt.value() );
}
if ( material.pbr_metallic_roughness.metallic_roughness_texture.texture )
{
cgltf_image const* metalRoughImage = material.pbr_metallic_roughness.metallic_roughness_texture.texture->image;
auto metalRoughTextureOpt = LoadTexture( renderDevice, sampler, *metalRoughImage, true );
if ( not metalRoughTextureOpt )
{
vkDestroySampler( renderDevice->device, Take( sampler ), nullptr );
renderDevice->textureManager->freeTexture( std::move( baseColorTexture ) );
return UINT32_MAX;
}
metalRoughTexture = std::move( metalRoughTextureOpt.value() );
}
if ( material.normal_texture.texture )
{
cgltf_image const* normalImage = material.normal_texture.texture->image;
auto normalTextureOpt = LoadTexture( renderDevice, sampler, *normalImage, true );
if ( not normalTextureOpt )
{
vkDestroySampler( renderDevice->device, Take( sampler ), nullptr );
renderDevice->textureManager->freeTexture( std::move( metalRoughTexture ) );
renderDevice->textureManager->freeTexture( std::move( baseColorTexture ) );
return UINT32_MAX;
}
normalTexture = std::move( normalTextureOpt.value() );
}
if ( material.emissive_texture.texture )
{
cgltf_image const* emissiveImage = material.emissive_texture.texture->image;
auto emissiveTextureOpt = LoadTexture( renderDevice, sampler, *emissiveImage, true );
if ( not emissiveTextureOpt )
{
vkDestroySampler( renderDevice->device, Take( sampler ), nullptr );
renderDevice->textureManager->freeTexture( std::move( baseColorTexture ) );
renderDevice->textureManager->freeTexture( std::move( normalTexture ) );
renderDevice->textureManager->freeTexture( std::move( metalRoughTexture ) );
return UINT32_MAX;
}
emissiveTexture = std::move( emissiveTextureOpt.value() );
}
float const metallic = material.pbr_metallic_roughness.metallic_factor;
float const roughness = material.pbr_metallic_roughness.roughness_factor;
uint32_t const materialIdx = static_cast<uint32_t>( model->materials.size() );
model->materials.push_back( {
sampler,
baseColorFactor,
emissiveFactor,
std::move( baseColorTexture ),
std::move( normalTexture ),
std::move( metalRoughTexture ),
std::move( emissiveTexture ),
roughness,
metallic,
} );
return materialIdx;
}
void LoadAttribute(
std::vector<Vertex>* pVertices,
int32_t const vertexStart,
std::vector<float>* scratch,
cgltf_attribute const& positionAttr,
size_t const stride,
size_t const offset,
size_t const components )
{
size_t const floatCount = cgltf_accessor_unpack_floats( positionAttr.data, nullptr, 0 );
ASSERT( floatCount % components == 0 );
scratch->resize( floatCount );
cgltf_accessor_unpack_floats( positionAttr.data, scratch->data(), scratch->size() );
// Guaranteed to have space for these vertices.
pVertices->resize( vertexStart + floatCount / components );
byte* writePtr = reinterpret_cast<byte*>( pVertices->data() + vertexStart ) + offset;
float const* readPtr = scratch->data();
for ( size_t i = vertexStart; i < pVertices->size(); ++i )
{
memcpy( writePtr, readPtr, components * sizeof( float ) );
readPtr += components;
writePtr += stride;
}
scratch->clear();
}
ModelMesh ProcessMesh(
RenderDevice* renderDevice,
Model* model,
std::vector<Vertex>* pVertices,
std::vector<uint32_t>* pIndices,
cgltf_mesh const& mesh )
{
using namespace std::string_view_literals;
uint32_t const primitiveStart = static_cast<uint32_t>( model->primitives.size() );
uint32_t const primitiveCount = static_cast<uint32_t>( mesh.primitives_count );
cgltf_primitive const* primitives = mesh.primitives;
for ( uint32_t primitiveIndex = 0; primitiveIndex < mesh.primitives_count; ++primitiveIndex )
{
// VertexStart is per-primitive
int32_t const vertexStart = static_cast<int32_t>( pVertices->size() );
cgltf_primitive const& primitive = primitives[primitiveIndex];
ASSERT( primitive.type == cgltf_primitive_type_triangles );
// Index Buffer
size_t const indexStart = pIndices->size();
size_t const indexCount = cgltf_accessor_unpack_indices( primitive.indices, nullptr, sizeof pIndices->at( 0 ), 0 );
ASSERT( indexCount > 0 );
pIndices->resize( indexStart + indexCount );
cgltf_accessor_unpack_indices(
primitive.indices, pIndices->data() + indexStart, sizeof pIndices->at( 0 ), indexCount );
// Material
uint32_t materialIdx = UINT32_MAX;
if ( primitive.material )
{
materialIdx = ProcessMaterial( renderDevice, model, *primitive.material );
}
model->primitives.push_back( Primitive{
.indexStart = static_cast<uint32_t>( indexStart ),
.indexCount = static_cast<uint32_t>( indexCount ),
.material = materialIdx,
.vertexOffset = vertexStart,
} );
std::vector<float> scratch;
cgltf_attribute const* attributes = primitive.attributes;
for ( uint32_t attribIndex = 0; attribIndex < primitive.attributes_count; ++attribIndex )
{
if ( "POSITION"sv == attributes[attribIndex].name )
{
cgltf_attribute const& positionAttr = attributes[attribIndex];
ASSERT( positionAttr.data->component_type == cgltf_component_type_r_32f );
ASSERT( positionAttr.data->type == cgltf_type_vec3 );
size_t constexpr stride = sizeof( Vertex );
size_t constexpr offset = offsetof( Vertex, position );
size_t constexpr components = 3;
LoadAttribute( pVertices, vertexStart, &scratch, positionAttr, stride, offset, components );
}
if ( "NORMAL"sv == attributes[attribIndex].name )
{
cgltf_attribute const& normalAttr = attributes[attribIndex];
ASSERT( normalAttr.data->component_type == cgltf_component_type_r_32f );
ASSERT( normalAttr.data->type == cgltf_type_vec3 );
size_t constexpr stride = sizeof( Vertex );
size_t constexpr offset = offsetof( Vertex, normal );
size_t constexpr components = 3;
LoadAttribute( pVertices, vertexStart, &scratch, normalAttr, stride, offset, components );
}
if ( "TANGENT"sv == attributes[attribIndex].name )
{
cgltf_attribute const& tangentAttr = attributes[attribIndex];
ASSERT( tangentAttr.data->component_type == cgltf_component_type_r_32f );
ASSERT( tangentAttr.data->type == cgltf_type_vec4 );
size_t constexpr stride = sizeof( Vertex );
size_t constexpr offset = offsetof( Vertex, tangent );
size_t constexpr components = 4;
LoadAttribute( pVertices, vertexStart, &scratch, tangentAttr, stride, offset, components );
}
if ( "TEXCOORD_0"sv == attributes[attribIndex].name )
{
cgltf_attribute const& texCoordAttr = attributes[attribIndex];
ASSERT( texCoordAttr.data->component_type == cgltf_component_type_r_32f );
ASSERT( texCoordAttr.data->type == cgltf_type_vec2 );
size_t constexpr stride = sizeof( Vertex );
size_t constexpr offset = offsetof( Vertex, texCoord0 );
size_t constexpr components = 2;
LoadAttribute( pVertices, vertexStart, &scratch, texCoordAttr, stride, offset, components );
}
if ( "TEXCOORD_1"sv == attributes[attribIndex].name )
{
cgltf_attribute const& texCoordAttr = attributes[attribIndex];
ASSERT( texCoordAttr.data->component_type == cgltf_component_type_r_32f );
ASSERT( texCoordAttr.data->type == cgltf_type_vec2 );
size_t constexpr stride = sizeof( Vertex );
size_t constexpr offset = offsetof( Vertex, texCoord1 );
size_t constexpr components = 2;
LoadAttribute( pVertices, vertexStart, &scratch, texCoordAttr, stride, offset, components );
}
if ( "COLOR_0"sv == attributes[attribIndex].name )
{
cgltf_attribute const& colorAttr = attributes[attribIndex];
ASSERT( colorAttr.data->component_type == cgltf_component_type_r_32f );
size_t constexpr stride = sizeof( Vertex );
size_t constexpr offset = offsetof( Vertex, texCoord1 );
size_t components = 3;
switch ( colorAttr.data->type )
{
case cgltf_type_vec3:
components = 3;
break;
case cgltf_type_vec4:
components = 4;
break;
default:
UNREACHABLE;
}
LoadAttribute( pVertices, vertexStart, &scratch, colorAttr, stride, offset, components );
}
// TODO: Grab other attributes.
}
}
return { primitiveStart, primitiveCount };
}
Entity* ProcessNode(
RenderDevice* renderDevice,
EntityManager* entityManager,
Model* model,
std::vector<Vertex>* vertices,
std::vector<uint32_t>* indices,
cgltf_node const& node )
{
DirectX::XMVECTOR vTranslation;
DirectX::XMVECTOR qRotation;
DirectX::XMVECTOR vScale;
if ( node.has_matrix )
{
DirectX::XMMATRIX const mat = DirectX::XMMATRIX{ node.matrix };
ASSERT( DirectX::XMMatrixDecompose( &vScale, &qRotation, &vTranslation, mat ) );
}
else
{
vTranslation = node.has_translation
? DirectX::XMVectorSet( node.translation[0], node.translation[1], node.translation[2], 1.0f )
: DirectX::XMVectorZero();
qRotation = node.has_rotation
? DirectX::XMVectorSet( node.rotation[0], node.rotation[1], node.rotation[2], node.rotation[3] )
: DirectX::XMQuaternionIdentity();
vScale = node.has_scale ? DirectX::XMVectorSet( node.scale[0], node.scale[1], node.scale[2], 1.0f )
: DirectX::XMVectorSplatOne();
}
auto tx = Transform{
.translation = vTranslation,
.rotation = qRotation,
.scale = vScale,
};
Entity* entity = entityManager->createEntity( tx );
if ( node.mesh )
{
entity->modelMesh = ProcessMesh( renderDevice, model, vertices, indices, *node.mesh );
}
for ( uint32_t childIdx = 0; childIdx < node.children_count; ++childIdx )
{
entity->addChild( ProcessNode( renderDevice, entityManager, model, vertices, indices, *node.children[childIdx] ) );
}
return entity;
}
Entity* LoadModel( RenderDevice* renderDevice, EntityManager* entityManager, const char* filename )
{
cgltf_data* gltfModel = nullptr;
cgltf_options options = {};
cgltf_result result = cgltf_parse_file( &options, filename, &gltfModel );
if ( result != cgltf_result_success )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "%s failed to load", filename );
cgltf_free( gltfModel );
return nullptr;
}
result = cgltf_validate( gltfModel );
if ( result != cgltf_result_success )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "%s is invalid.", filename );
cgltf_free( gltfModel );
return nullptr;
}
result = cgltf_load_buffers( &options, gltfModel, filename );
if ( result != cgltf_result_success )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "%s buffers failed to load.", filename );
cgltf_free( gltfModel );
return nullptr;
}
Entity* entity = entityManager->createEntity( {
.translation = DirectX::XMVectorZero(),
.rotation = DirectX::XMQuaternionIdentity(),
.scale = DirectX::XMVectorSplatOne(),
} );
// Output data
std::vector<Vertex> vertices;
std::vector<uint32_t> indices;
cgltf_scene const* currentScene = gltfModel->scene;
for ( uint32_t nodeIdx = 0; nodeIdx < currentScene->nodes_count; ++nodeIdx )
{
entity->addChild( ProcessNode(
renderDevice, entityManager, &entity->model, &vertices, &indices, *currentScene->nodes[nodeIdx] ) );
}
auto vertexBuffer = renderDevice->bufferManager->createVertexBuffer( vertices.size() * sizeof vertices[0] );
if ( not vertexBuffer )
{
return nullptr;
}
entity->model.vertexBuffer = std::move( vertexBuffer.value() );
renderDevice->bufferManager->writeToBuffer( entity->model.vertexBuffer, vertices );
auto indexBuffer = renderDevice->bufferManager->createIndexBuffer( indices.size() * sizeof indices[0] );
if ( not indexBuffer )
{
return nullptr;
}
entity->model.indexBuffer = std::move( indexBuffer.value() );
renderDevice->bufferManager->writeToBuffer( entity->model.indexBuffer, std::span{ indices } );
cgltf_free( gltfModel );
return entity;
}

View File

View File

@ -1,55 +0,0 @@
#pragma once
#include <cstdint>
template <typename T>
struct RID
{
private:
uint32_t m_index = 0;
explicit RID( uint32_t const index ) : m_index{ index }
{}
public:
RID() = default;
// No copy
RID( RID const& ) = delete;
RID& operator=( RID const& ) = delete;
// Move allowed
RID( RID&& other ) noexcept;
RID& operator=( RID&& other ) noexcept;
[[nodiscard]] bool isNull() const
{
return m_index == 0;
}
static RID null()
{
return {};
}
operator bool() const
{
return m_index != 0;
}
};
template <typename T>
RID<T>::RID( RID&& other ) noexcept : m_index{ other.m_index }
{
other.m_index = 0;
}
template <typename T>
RID<T>& RID<T>::operator=( RID&& other ) noexcept
{
if ( this == &other ) return *this;
m_index = other.m_index;
other.m_index = 0;
return *this;
}

View File

@ -1,518 +0,0 @@
#include "RenderDevice.h"
#include "MacroUtils.h"
#include <SDL3/SDL_log.h>
#include <array>
#include <optional>
#include <span>
#include "BufferManager.h"
#include "Frame.h"
#include "GlobalMemory.h"
#include "MathUtil.h"
#include "TextureManager.h"
RenderDevice::~RenderDevice()
{
ASSERT( !isInit() );
}
// TODO: Failure Handling
RenderDevice* RenderDevice_Create( GlobalMemory* mem, RenderDevice::CreateInfo const& createInfo )
{
ASSERT( mem );
ASSERT( createInfo.window );
volkInitialize();
VkInstance instance;
// Create Instance
{
VkApplicationInfo constexpr applicationInfo = {
.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
.pNext = nullptr,
.pApplicationName = "Test",
.applicationVersion = VK_MAKE_API_VERSION( 0, 0, 1, 0 ),
.pEngineName = "Blaze",
.engineVersion = VK_MAKE_API_VERSION( 0, 0, 1, 0 ),
.apiVersion = VK_API_VERSION_1_3,
};
uint32_t instanceExtensionCount;
char const* const* instanceExtensions = SDL_Vulkan_GetInstanceExtensions( &instanceExtensionCount );
VkInstanceCreateInfo const instanceCreateInfo = {
.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.pApplicationInfo = &applicationInfo,
.enabledLayerCount = 0,
.ppEnabledLayerNames = nullptr,
.enabledExtensionCount = instanceExtensionCount,
.ppEnabledExtensionNames = instanceExtensions,
};
VK_CHECK( vkCreateInstance( &instanceCreateInfo, nullptr, &instance ) );
volkLoadInstance( instance );
}
VkSurfaceKHR surface;
// Create Surface
ASSERT( SDL_Vulkan_CreateSurface( createInfo.window, instance, nullptr, &surface ) );
VkPhysicalDevice physicalDeviceInUse = nullptr;
VkDevice device = nullptr;
VmaAllocator gpuAllocator = nullptr;
std::optional<uint32_t> directQueueFamilyIndex = std::nullopt;
VkQueue directQueue = nullptr;
// Create Device and Queue
{
auto tempAllocStart = mem->getState();
uint32_t physicalDeviceCount;
VK_CHECK( vkEnumeratePhysicalDevices( instance, &physicalDeviceCount, nullptr ) );
SDL_LogInfo( SDL_LOG_CATEGORY_GPU, "Found %u GPUs", physicalDeviceCount );
VkPhysicalDevice* physicalDevices =
reinterpret_cast<VkPhysicalDevice*>( mem->allocate( sizeof( VkPhysicalDevice ) * physicalDeviceCount ) );
VK_CHECK( vkEnumeratePhysicalDevices( instance, &physicalDeviceCount, physicalDevices ) );
for ( VkPhysicalDevice const physicalDevice : std::span{ physicalDevices, physicalDeviceCount } )
{
auto tempAllocQueueProperties = mem->getState();
VkPhysicalDeviceProperties properties;
vkGetPhysicalDeviceProperties( physicalDevice, &properties );
SDL_LogInfo( SDL_LOG_CATEGORY_GPU, "GPU: %s", properties.deviceName );
SDL_LogInfo(
SDL_LOG_CATEGORY_GPU,
"- API Version %d.%d.%d",
VK_API_VERSION_MAJOR( properties.apiVersion ),
VK_API_VERSION_MINOR( properties.apiVersion ),
VK_API_VERSION_PATCH( properties.apiVersion ) );
constexpr static uint32_t API_PATCH_BITS = 0xFFF;
if ( ( properties.apiVersion & ( ~API_PATCH_BITS ) ) < VK_API_VERSION_1_3 )
{
continue;
}
if ( properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_CPU )
{
continue;
}
uint32_t queueFamilyCount;
vkGetPhysicalDeviceQueueFamilyProperties( physicalDevice, &queueFamilyCount, nullptr );
VkQueueFamilyProperties* queueFamilyProperties = reinterpret_cast<VkQueueFamilyProperties*>(
mem->allocate( sizeof( VkQueueFamilyProperties ) * queueFamilyCount ) );
vkGetPhysicalDeviceQueueFamilyProperties( physicalDevice, &queueFamilyCount, queueFamilyProperties );
for ( uint32_t queueFamilyIndex = 0; queueFamilyIndex != queueFamilyCount; ++queueFamilyIndex )
{
VkQueueFamilyProperties const& qfp = queueFamilyProperties[queueFamilyIndex];
bool hasGraphicsSupport = false;
bool hasComputeSupport = false;
bool hasTransferSupport = false;
bool hasPresentSupport = false;
SDL_LogInfo( SDL_LOG_CATEGORY_GPU, "- Queue [%d]", queueFamilyIndex );
if ( qfp.queueFlags & VK_QUEUE_GRAPHICS_BIT )
{
hasGraphicsSupport = true;
SDL_LogInfo( SDL_LOG_CATEGORY_GPU, "-- Graphic" );
}
if ( qfp.queueFlags & VK_QUEUE_COMPUTE_BIT )
{
hasComputeSupport = true;
SDL_LogInfo( SDL_LOG_CATEGORY_GPU, "-- Compute" );
}
if ( qfp.queueFlags & VK_QUEUE_TRANSFER_BIT )
{
hasTransferSupport = true;
SDL_LogInfo( SDL_LOG_CATEGORY_GPU, "-- Transfer" );
}
VkBool32 isSurfaceSupported;
VK_CHECK(
vkGetPhysicalDeviceSurfaceSupportKHR( physicalDevice, queueFamilyIndex, surface, &isSurfaceSupported ) );
if ( isSurfaceSupported )
{
hasPresentSupport = true;
SDL_LogInfo( SDL_LOG_CATEGORY_GPU, "-- Present" );
}
if ( hasGraphicsSupport and hasComputeSupport and hasTransferSupport and hasPresentSupport )
{
physicalDeviceInUse = physicalDevice;
directQueueFamilyIndex = queueFamilyIndex;
break;
}
}
mem->restoreState( tempAllocQueueProperties );
}
ASSERT( physicalDeviceInUse );
ASSERT( directQueueFamilyIndex.has_value() );
float priority = 1.0f;
VkDeviceQueueCreateInfo queueCreateInfo = {
.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.queueFamilyIndex = directQueueFamilyIndex.value(),
.queueCount = 1,
.pQueuePriorities = &priority,
};
VkPhysicalDeviceVulkan13Features features13 = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES,
.pNext = nullptr,
.synchronization2 = true,
.dynamicRendering = true,
};
VkPhysicalDeviceVulkan12Features const features12 = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES,
.pNext = &features13,
.descriptorIndexing = true,
.shaderSampledImageArrayNonUniformIndexing = true,
.shaderStorageImageArrayNonUniformIndexing = true,
.descriptorBindingUniformBufferUpdateAfterBind = true,
.descriptorBindingSampledImageUpdateAfterBind = true,
.descriptorBindingStorageImageUpdateAfterBind = true,
.descriptorBindingUpdateUnusedWhilePending = true,
.descriptorBindingPartiallyBound = true,
.descriptorBindingVariableDescriptorCount = true,
.runtimeDescriptorArray = true,
.bufferDeviceAddress = true,
};
VkPhysicalDeviceFeatures features = {
.depthClamp = true,
.samplerAnisotropy = true,
};
std::array enabledDeviceExtensions = { VK_KHR_SWAPCHAIN_EXTENSION_NAME };
VkDeviceCreateInfo const deviceCreateInfo = {
.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
.pNext = &features12,
.flags = 0,
.queueCreateInfoCount = 1,
.pQueueCreateInfos = &queueCreateInfo,
.enabledLayerCount = 0,
.ppEnabledLayerNames = nullptr,
.enabledExtensionCount = static_cast<uint32_t>( enabledDeviceExtensions.size() ),
.ppEnabledExtensionNames = enabledDeviceExtensions.data(),
.pEnabledFeatures = &features,
};
VK_CHECK( vkCreateDevice( physicalDeviceInUse, &deviceCreateInfo, nullptr, &device ) );
volkLoadDevice( device );
VmaAllocatorCreateInfo allocatorCreateInfo = {
.flags = VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT,
.physicalDevice = physicalDeviceInUse,
.device = device,
.preferredLargeHeapBlockSize = 0,
.pAllocationCallbacks = nullptr,
.pDeviceMemoryCallbacks = nullptr,
.pHeapSizeLimit = nullptr,
.pVulkanFunctions = nullptr,
.instance = instance,
.vulkanApiVersion = VK_API_VERSION_1_3,
.pTypeExternalMemoryHandleTypes = nullptr,
};
VmaVulkanFunctions vkFunctions;
VK_CHECK( vmaImportVulkanFunctionsFromVolk( &allocatorCreateInfo, &vkFunctions ) );
allocatorCreateInfo.pVulkanFunctions = &vkFunctions;
VK_CHECK( vmaCreateAllocator( &allocatorCreateInfo, &gpuAllocator ) );
vkGetDeviceQueue( device, directQueueFamilyIndex.value(), 0, &directQueue );
mem->restoreState( tempAllocStart );
}
// Swapchain creation
VkExtent2D swapchainExtent = { createInfo.width, createInfo.height };
VkFormat swapchainFormat = VK_FORMAT_UNDEFINED;
VkSwapchainKHR swapchain;
VkImage* swapchainImages;
VkImageView* swapchainViews;
uint32_t swapchainImageCount;
{
auto tempAllocStart = mem->getState();
VkSurfaceCapabilitiesKHR capabilities;
VK_CHECK( vkGetPhysicalDeviceSurfaceCapabilitiesKHR( physicalDeviceInUse, surface, &capabilities ) );
// Image Count Calculation
swapchainImageCount = 3;
if ( capabilities.maxImageCount > 0 )
{
swapchainImageCount = std::min( swapchainImageCount, capabilities.maxImageCount );
}
swapchainImageCount = std::max( swapchainImageCount, capabilities.minImageCount + 1 );
// Image Size calculation
{
auto [minWidth, minHeight] = capabilities.minImageExtent;
auto [maxWidth, maxHeight] = capabilities.maxImageExtent;
swapchainExtent.width = Clamp( swapchainExtent.width, minWidth, maxWidth );
swapchainExtent.height = Clamp( swapchainExtent.height, minHeight, maxHeight );
}
uint32_t surfaceFormatCount;
vkGetPhysicalDeviceSurfaceFormatsKHR( physicalDeviceInUse, surface, &surfaceFormatCount, nullptr );
VkSurfaceFormatKHR* surfaceFormats =
reinterpret_cast<VkSurfaceFormatKHR*>( mem->allocate( sizeof( VkSurfaceFormatKHR ) * surfaceFormatCount ) );
vkGetPhysicalDeviceSurfaceFormatsKHR( physicalDeviceInUse, surface, &surfaceFormatCount, surfaceFormats );
VkSurfaceFormatKHR format = {
.format = VK_FORMAT_UNDEFINED,
.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR,
};
for ( auto& surfaceFormat : std::span{ surfaceFormats, surfaceFormatCount } )
{
if ( surfaceFormat.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR )
{
SDL_LogInfo( SDL_LOG_CATEGORY_GPU, "Color Space SRGB Found %d", surfaceFormat.format );
if ( surfaceFormat.format == VK_FORMAT_R8G8B8A8_SRGB )
{
format = surfaceFormat;
break;
}
if ( surfaceFormat.format == VK_FORMAT_B8G8R8A8_SRGB )
{
format = surfaceFormat;
break;
}
if ( surfaceFormat.format == VK_FORMAT_R8G8B8A8_UNORM )
{
format = surfaceFormat;
}
}
}
ASSERT( format.format != VK_FORMAT_UNDEFINED );
swapchainFormat = format.format;
uint32_t presentModeCount;
vkGetPhysicalDeviceSurfacePresentModesKHR( physicalDeviceInUse, surface, &presentModeCount, nullptr );
VkPresentModeKHR* presentModes =
reinterpret_cast<VkPresentModeKHR*>( mem->allocate( sizeof( VkPresentModeKHR ) * presentModeCount ) );
vkGetPhysicalDeviceSurfacePresentModesKHR( physicalDeviceInUse, surface, &presentModeCount, presentModes );
VkPresentModeKHR presentMode = VK_PRESENT_MODE_FIFO_KHR;
for ( VkPresentModeKHR presentModeIter : std::span{ presentModes, presentModeCount } )
{
if ( presentModeIter == VK_PRESENT_MODE_FIFO_RELAXED_KHR )
{
presentMode = presentModeIter;
break;
}
if ( presentModeIter == VK_PRESENT_MODE_MAILBOX_KHR )
{
presentMode = presentModeIter;
}
}
mem->restoreState( tempAllocStart );
VkSwapchainCreateInfoKHR const swapchainCreateInfo = {
.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
.pNext = nullptr,
.flags = 0,
.surface = surface,
.minImageCount = swapchainImageCount,
.imageFormat = format.format,
.imageColorSpace = format.colorSpace,
.imageExtent = swapchainExtent,
.imageArrayLayers = 1,
.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR,
.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
.presentMode = presentMode,
.clipped = false,
.oldSwapchain = nullptr,
};
VK_CHECK( vkCreateSwapchainKHR( device, &swapchainCreateInfo, nullptr, &swapchain ) );
swapchainImageCount = 0;
vkGetSwapchainImagesKHR( device, swapchain, &swapchainImageCount, nullptr );
swapchainImages = reinterpret_cast<VkImage*>( mem->allocate( sizeof( VkImage ) * swapchainImageCount ) );
vkGetSwapchainImagesKHR( device, swapchain, &swapchainImageCount, swapchainImages );
swapchainViews = reinterpret_cast<VkImageView*>( mem->allocate( sizeof( VkImageView ) * swapchainImageCount ) );
for ( uint32_t i = 0; i != swapchainImageCount; ++i )
{
VkImageViewCreateInfo const viewCreateInfo = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.image = swapchainImages[i],
.viewType = VK_IMAGE_VIEW_TYPE_2D,
.format = format.format,
.components = {
VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY
},
.subresourceRange = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
}
};
VK_CHECK( vkCreateImageView( device, &viewCreateInfo, nullptr, &swapchainViews[i] ) );
}
}
// Init frames.
Frame* frames = reinterpret_cast<Frame*>( mem->allocate( sizeof( Frame ) * swapchainImageCount ) );
for ( uint32_t i = 0; i != swapchainImageCount; ++i )
{
Frame_Create( frames + i, device, gpuAllocator, directQueueFamilyIndex.value(), swapchainExtent );
}
std::byte* allocation = mem->allocate( sizeof( RenderDevice ), alignof( RenderDevice ) );
if ( not allocation ) return nullptr;
RenderDevice* renderDevice = new ( allocation ) RenderDevice{
instance,
surface,
physicalDeviceInUse,
device,
gpuAllocator,
directQueue,
directQueueFamilyIndex.value(),
swapchainFormat,
swapchainExtent,
swapchain,
swapchainImages,
swapchainViews,
frames,
swapchainImageCount,
};
TextureManager* textureManager = TextureManager_Create( mem, renderDevice, 10000 );
if ( !textureManager )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "TextureManager failed to init" );
renderDevice->destroy();
return nullptr;
}
renderDevice->textureManager = textureManager;
ASSERT( renderDevice->textureManager );
BufferManager* bufferManager = BufferManager_Create( mem, renderDevice, 10000 );
if ( !bufferManager )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "BufferManager failed to init" );
renderDevice->destroy();
return nullptr;
}
renderDevice->bufferManager = bufferManager;
ASSERT( renderDevice->bufferManager );
return renderDevice;
}
inline bool RenderDevice::isInit() const
{
return instance and device and textureManager;
}
void RenderDevice::destroy()
{
if ( not isInit() ) return;
Take( bufferManager )->destroy();
Take( textureManager )->destroy();
for ( Frame& frame : std::span{ Take( frames ), swapchainImageCount } )
{
frame.destroy( *this );
}
for ( auto const& view : std::span{ Take( swapchainViews ), swapchainImageCount } )
{
vkDestroyImageView( device, view, nullptr );
}
vkDestroySwapchainKHR( device, Take( swapchain ), nullptr );
vmaDestroyAllocator( Take( gpuAllocator ) );
vkDestroyDevice( Take( device ), nullptr );
SDL_Vulkan_DestroySurface( instance, Take( surface ), nullptr );
vkDestroyInstance( Take( instance ), nullptr );
volkFinalize();
}
void RenderDevice::waitIdle() const
{
VK_CHECK( vkDeviceWaitIdle( device ) );
}
uint32_t RenderDevice::getNumFrames() const
{
return swapchainImageCount;
}
RenderDevice::RenderDevice(
VkInstance const instance,
VkSurfaceKHR const surface,
VkPhysicalDevice const physicalDeviceInUse,
VkDevice const device,
VmaAllocator const gpuAllocator,
VkQueue const directQueue,
uint32_t const directQueueFamilyIndex,
VkFormat const swapchainFormat,
VkExtent2D const swapchainExtent,
VkSwapchainKHR const swapchain,
VkImage* swapchainImages,
VkImageView* swapchainViews,
Frame* frames,
uint32_t const swapchainImageCount )
: instance{ instance }
, surface{ surface }
, physicalDeviceInUse{ physicalDeviceInUse }
, device{ device }
, gpuAllocator{ gpuAllocator }
, directQueue{ directQueue }
, directQueueFamilyIndex{ directQueueFamilyIndex }
, swapchainFormat{ swapchainFormat }
, swapchainExtent{ swapchainExtent }
, swapchain{ swapchain }
, swapchainImages{ swapchainImages }
, swapchainViews{ swapchainViews }
, frames{ frames }
, swapchainImageCount{ swapchainImageCount }
, textureManager{ nullptr }
{}

80
Blaze/Source/AppState.cpp Normal file
View File

@ -0,0 +1,80 @@
#include "AppState.h"
#include <SDL3/SDL_log.h>
#include "EntityManager.h"
#include "GlobalMemory.h"
#include "MiscData.h"
#include "RenderDevice.h"
#include "TextureManager.h"
namespace Blaze
{
bool AppState::IsInit() const
{
return window and renderDevice and renderDevice->IsInit();
}
void AppState::Destroy()
{
if ( !IsInit() ) return;
renderDevice->WaitIdle();
Take( miscData )->Destroy( *renderDevice );
Take( entityManager )->Destroy();
Take( renderDevice )->Destroy();
SDL_DestroyWindow( Take( window ) );
}
AppState::AppState(
SDL_Window* window, RenderDevice* render_device, EntityManager* entity_manager, MiscData* misc_data )
: window{ window }
, renderDevice{ render_device }
, entityManager{ entity_manager }
, miscData{ misc_data }
, sprintfBuffer{ 0 }
{}
AppState* AppState::Create( GlobalMemory* memory, uint32_t width, uint32_t height )
{
SDL_Window* window =
SDL_CreateWindow( "Blaze Test", static_cast<int>( width ), static_cast<int>( height ), SDL_WINDOW_VULKAN );
ASSERT( window );
RenderDevice* render_device = RenderDevice::Create( memory, { .window = window } );
ASSERT( render_device );
EntityManager* entity_manager = EntityManager::Create( memory, render_device, 1000 );
ASSERT( entity_manager );
byte* misc_data_allocation = memory->Allocate( sizeof( MiscData ) );
MiscData* misc_data = new ( misc_data_allocation ) MiscData{};
if ( !misc_data->Init( *render_device ) )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "MiscData failed to init" );
entity_manager->Destroy();
render_device->Destroy();
SDL_DestroyWindow( window );
ASSERT( false );
return nullptr;
}
byte* allocation = memory->Allocate( sizeof( AppState ) );
AppState* app_state = new ( allocation ) AppState{ window, render_device, entity_manager, misc_data };
return app_state;
}
#if defined( DTOR_TEST )
AppState::~AppState()
{
ASSERT( not IsInit() );
}
#endif
} // namespace Blaze

36
Blaze/Source/AppState.h Normal file
View File

@ -0,0 +1,36 @@
#pragma once
#include <cstdint>
// ReSharper disable once CppInconsistentNaming
struct SDL_Window;
namespace Blaze
{
struct GlobalMemory;
struct MiscData;
struct RenderDevice;
struct EntityManager;
struct AppState
{
SDL_Window* window;
RenderDevice* renderDevice;
EntityManager* entityManager;
MiscData* miscData;
char sprintfBuffer[256];
[[nodiscard]] bool IsInit() const;
static AppState* Create( GlobalMemory* memory, uint32_t width, uint32_t height );
void Destroy();
AppState( SDL_Window* window, RenderDevice* render_device, EntityManager* entity_manager, MiscData* misc_data );
#if defined( DTOR_TEST )
~AppState();
#endif
};
} // namespace Blaze

391
Blaze/Source/Blaze.cpp Normal file
View File

@ -0,0 +1,391 @@
// Blaze.cpp : This file contains the 'main' function. Program execution begins and ends there.
//
#include <array>
#include <cassert>
#include <functional>
#include <limits>
#include <span>
#define SDL_MAIN_USE_CALLBACKS 1
#include <SDL3/SDL.h>
#include <SDL3/SDL_main.h>
#include <SDL3/SDL_vulkan.h>
#include "VulkanHeader.h"
#include "AppState.h"
#include "EntityManager.h"
#include "Frame.h"
#include "GlobalMemory.h"
#include "MacroUtils.h"
#include "MiscData.h"
#include "RenderDevice.h"
#include "ModelLoader.h"
constexpr uint32_t WIDTH = 1280;
constexpr uint32_t HEIGHT = 720;
constexpr uint32_t NUM_FRAMES = 3;
namespace Blaze::Global
{
GlobalMemory g_Memory;
}
// ReSharper disable once CppInconsistentNaming
SDL_AppResult SDL_AppInit( void** appstate, int, char** )
{
SDL_Init( SDL_INIT_VIDEO | SDL_INIT_EVENTS );
using Blaze::operator""_MiB;
Blaze::Global::g_Memory.Initialize( 128_MiB );
*appstate = Blaze::AppState::Create( &Blaze::Global::g_Memory, WIDTH, HEIGHT );
if ( !*appstate ) return SDL_APP_FAILURE;
Blaze::AppState const& app_state = *static_cast<Blaze::AppState*>( *appstate );
Blaze::Entity const* entity =
LoadModel( app_state.renderDevice, app_state.entityManager, "Assets/Models/DamagedHelmet.glb" );
ASSERT( entity );
Blaze::MiscData::PointLight point_light[] = {
{
.position = { 12.0f, 0.0f, 0.0f },
.range = 12,
.color = { 1.0f, 0.0f, 0.0f },
.attenuation = 1.0f,
},
{
.position = { 0.0f, 3.0f, 0.0f },
.range = 12,
.color = { 12.0f, 12.0f, 12.0f },
.attenuation = 1.0f,
},
{
.position = { 0.0f, 0.0f, -12.0f },
.range = 6,
.color = { 0.0f, 0.0f, 1.0f },
.attenuation = 1.0f,
},
};
app_state.miscData->lightData.pointLightCount = _countof( point_light );
app_state.renderDevice->bufferManager->WriteToBuffer( app_state.miscData->pointLights, point_light );
Blaze::MiscData::DirectionalLight dir_light[] = {
{
.direction = { 1.0f, -1.0f, 0.0f },
.color = { 12.0f, 10.0f, 5.0f },
},
};
app_state.miscData->lightData.dirLightCount = _countof( dir_light );
app_state.renderDevice->bufferManager->WriteToBuffer( app_state.miscData->directionalLights, dir_light );
memcpy(
app_state.miscData->cameraUniformBufferPtr + sizeof( Blaze::MiscData::CameraData ),
&app_state.miscData->lightData,
sizeof app_state.miscData->lightData );
return SDL_APP_CONTINUE;
}
// ReSharper disable once CppInconsistentNaming
SDL_AppResult SDL_AppIterate( void* appstate )
{
Blaze::AppState& app_state = *static_cast<Blaze::AppState*>( appstate );
Blaze::RenderDevice& render_device = *app_state.renderDevice;
Blaze::EntityManager& entity_manager = *app_state.entityManager;
Blaze::MiscData& misc = *app_state.miscData;
Blaze::Frame& current_frame = render_device.frames[render_device.frameIndex];
VK_CHECK( vkWaitForFences( render_device.device, 1, &current_frame.frameReadyToReuse, VK_TRUE, UINT32_MAX ) );
// All resources of frame 'frameIndex' are free.
// time calc
uint64_t const previous_counter = misc.previousCounter;
uint64_t const current_counter = SDL_GetPerformanceCounter();
uint64_t const delta_count = current_counter - previous_counter;
uint64_t const perf_freq = SDL_GetPerformanceFrequency();
double const delta_time = static_cast<double>( delta_count ) / static_cast<double>( perf_freq );
misc.previousCounter = current_counter;
{
misc.frameTimeSum -= misc.frameTime[misc.frameTimeWriteHead];
misc.frameTime[misc.frameTimeWriteHead] = delta_time;
misc.frameTimeSum += delta_time;
misc.frameTimeWriteHead = ( misc.frameTimeWriteHead + 1 ) % misc.frameTimeEntryCount;
double avg_delta_time = ( misc.frameTimeSum / misc.frameTimeEntryCount );
double fps = 1.0 / avg_delta_time;
double avg_delta_time_ms = 1000.0 * avg_delta_time;
( void )sprintf_s<256>( app_state.sprintfBuffer, "%.2f fps %.2f ms", fps, avg_delta_time_ms );
SDL_SetWindowTitle( app_state.window, app_state.sprintfBuffer );
}
for ( Blaze::Entity& entity : entity_manager.Iterate() )
{
if ( not entity.IsRoot() ) continue;
entity.transform.rotation = DirectX::XMQuaternionMultiply(
DirectX::XMQuaternionRotationAxis(
DirectX::XMVectorSet( 0.0f, 1.0f, 0.0f, 0.0f ),
DirectX::XMConvertToRadians( 60.0f ) * static_cast<float>( delta_time ) ),
entity.transform.rotation );
}
uint32_t current_image_index;
VK_CHECK( vkAcquireNextImageKHR(
render_device.device,
render_device.swapchain,
std::numeric_limits<uint32_t>::max(),
current_frame.imageAcquiredSemaphore,
nullptr,
&current_image_index ) );
// TODO: Resize Swapchain if required.
VK_CHECK( vkResetFences( render_device.device, 1, &current_frame.frameReadyToReuse ) );
VK_CHECK( vkResetCommandPool( render_device.device, current_frame.commandPool, 0 ) );
misc.acquireToRenderBarrier.image = render_device.swapchainImages[current_image_index];
misc.renderToPresentBarrier.image = render_device.swapchainImages[current_image_index];
VkCommandBuffer cmd = current_frame.commandBuffer;
VkCommandBufferBeginInfo constexpr begin_info = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.pNext = nullptr,
.flags = 0,
.pInheritanceInfo = nullptr,
};
VkClearColorValue constexpr static kBlackClear = {
.float32 = { 0.0f, 0.0f, 0.0f, 1.0f },
};
VkClearDepthStencilValue constexpr static kDepthStencilClear = {
.depth = 1.0f,
.stencil = 0,
};
VK_CHECK( vkBeginCommandBuffer( cmd, &begin_info ) );
{
VkRenderingAttachmentInfo const depth_attachment_info = {
.sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
.pNext = nullptr,
.imageView = current_frame.depthView,
.imageLayout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL,
.resolveMode = VK_RESOLVE_MODE_NONE,
.resolveImageView = nullptr,
.resolveImageLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
.clearValue = { .depthStencil = kDepthStencilClear },
};
VkRenderingAttachmentInfo const attachment_info = {
.sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO,
.pNext = nullptr,
.imageView = render_device.swapchainViews[current_image_index],
.imageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
.resolveMode = VK_RESOLVE_MODE_NONE,
.resolveImageView = nullptr,
.resolveImageLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
.clearValue = { .color = kBlackClear },
};
VkRenderingInfo rendering_info = {
.sType = VK_STRUCTURE_TYPE_RENDERING_INFO,
.pNext = nullptr,
.flags = 0,
.renderArea = { .offset = { 0, 0 }, .extent = render_device.swapchainExtent },
.layerCount = 1,
.viewMask = 0,
.colorAttachmentCount = 1,
.pColorAttachments = &attachment_info,
.pDepthAttachment = &depth_attachment_info,
.pStencilAttachment = nullptr,
};
vkCmdPipelineBarrier2( cmd, &misc.acquireToRenderDependency );
vkCmdBeginRendering( cmd, &rendering_info );
{
VkViewport viewport = {
.x = 0,
.y = static_cast<float>( render_device.swapchainExtent.height ),
.width = static_cast<float>( render_device.swapchainExtent.width ),
.height = -static_cast<float>( render_device.swapchainExtent.height ),
.minDepth = 0.0f,
.maxDepth = 1.0f,
};
vkCmdSetViewport( cmd, 0, 1, &viewport );
VkRect2D scissor = {
.offset = { 0, 0 },
.extent = render_device.swapchainExtent,
};
vkCmdSetScissor( cmd, 0, 1, &scissor );
// Render Something?
vkCmdBindPipeline( cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, misc.meshPipeline );
vkCmdBindDescriptorSets(
cmd,
VK_PIPELINE_BIND_POINT_GRAPHICS,
misc.pipelineLayout,
0,
1,
&render_device.textureManager->DescriptorSet(),
0,
nullptr );
vkCmdBindDescriptorSets(
cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, misc.pipelineLayout, 1, 1, &misc.descriptorSet, 0, nullptr );
std::function<void( Blaze::Entity const&, DirectX::XMMATRIX const&, Blaze::Model const* )> draw_entity =
[&]( Blaze::Entity const& entity, DirectX::XMMATRIX const& parent, Blaze::Model const* current )
{
Blaze::Transform const& local_transform = entity.transform;
DirectX::XMMATRIX world_transform;
{
world_transform =
DirectX::XMMatrixAffineTransformation(
local_transform.scale, DirectX::XMVectorZero(), local_transform.rotation, local_transform.translation ) *
parent;
}
if ( not entity.model.IsNull() )
{
VkBuffer const vertex_buffer = render_device.bufferManager->FetchBuffer( entity.model.vertexBuffer ).value();
VkBuffer const index_buffer = render_device.bufferManager->FetchBuffer( entity.model.indexBuffer ).value();
VkDeviceSize constexpr offset = 0;
vkCmdBindVertexBuffers( cmd, 0, 1, &vertex_buffer, &offset );
vkCmdBindIndexBuffer( cmd, index_buffer, offset, VK_INDEX_TYPE_UINT32 );
}
vkCmdPushConstants(
cmd, misc.pipelineLayout, VK_SHADER_STAGE_ALL_GRAPHICS, 0, sizeof world_transform, &world_transform );
DirectX::XMMATRIX const inverse_transform = XMMatrixInverse( nullptr, world_transform );
vkCmdPushConstants(
cmd,
misc.pipelineLayout,
VK_SHADER_STAGE_ALL_GRAPHICS,
sizeof world_transform,
sizeof inverse_transform,
&inverse_transform );
if ( not entity.modelMesh.IsNull() )
{
ASSERT( current );
for ( Blaze::Primitive const& primitive : std::span{
current->primitives.data() + entity.modelMesh.primitiveStart, entity.modelMesh.primitiveCount } )
{
Blaze::byte const* material_data;
if ( primitive.material != UINT32_MAX )
{
Blaze::Material const* mat = &current->materials[primitive.material];
material_data = reinterpret_cast<Blaze::byte const*>( mat );
material_data += Blaze::Material::kGPUDataOffset;
}
else
{
material_data = reinterpret_cast<Blaze::byte const*>( &Blaze::DEFAULT_MATERIAL );
material_data += Blaze::Material::kGPUDataOffset;
}
vkCmdPushConstants(
cmd,
misc.pipelineLayout,
VK_SHADER_STAGE_ALL_GRAPHICS,
2 * sizeof world_transform,
Blaze::Material::kGPUDataSize,
material_data );
vkCmdDrawIndexed( cmd, primitive.indexCount, 1, primitive.indexStart, primitive.vertexOffset, 0 );
}
}
for ( Blaze::Entity& child : entity.IterChildren() )
{
draw_entity( child, world_transform, entity.model.IsNull() ? current : &entity.model );
}
};
for ( Blaze::Entity const& entity : entity_manager.Iterate() )
{
if ( not entity.IsRoot() )
{
continue;
}
draw_entity( entity, DirectX::XMMatrixIdentity(), nullptr );
}
}
vkCmdEndRendering( cmd );
vkCmdPipelineBarrier2( cmd, &misc.renderToPresentDependency );
}
VK_CHECK( vkEndCommandBuffer( cmd ) );
VkPipelineStageFlags stage_mask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
VkSubmitInfo const submit_info = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = nullptr,
.waitSemaphoreCount = 1,
.pWaitSemaphores = &current_frame.imageAcquiredSemaphore,
.pWaitDstStageMask = &stage_mask,
.commandBufferCount = 1,
.pCommandBuffers = &cmd,
.signalSemaphoreCount = 1,
.pSignalSemaphores = &current_frame.renderFinishedSemaphore,
};
VK_CHECK( vkQueueSubmit( render_device.directQueue, 1, &submit_info, current_frame.frameReadyToReuse ) );
VkPresentInfoKHR const present_info = {
.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
.pNext = nullptr,
.waitSemaphoreCount = 1,
.pWaitSemaphores = &current_frame.renderFinishedSemaphore,
.swapchainCount = 1,
.pSwapchains = &render_device.swapchain,
.pImageIndices = &current_image_index,
.pResults = nullptr,
};
VK_CHECK( vkQueuePresentKHR( render_device.directQueue, &present_info ) );
render_device.frameIndex = ( render_device.frameIndex + 1 ) % NUM_FRAMES;
return SDL_APP_CONTINUE;
}
// ReSharper disable once CppInconsistentNaming
SDL_AppResult SDL_AppEvent( void*, SDL_Event* event )
{
if ( event->type == SDL_EVENT_QUIT )
{
return SDL_APP_SUCCESS;
}
return SDL_APP_CONTINUE;
}
// ReSharper disable once CppInconsistentNaming
void SDL_AppQuit( void* appstate, SDL_AppResult )
{
auto* app_state = static_cast<Blaze::AppState*>( appstate );
if ( app_state ) app_state->Destroy();
Blaze::Global::g_Memory.Destroy();
}

View File

@ -0,0 +1,331 @@
#include "BufferManager.h"
#include "GlobalMemory.h"
using Blaze::BufferManager;
void BufferManager::DestroyBuffer( Buffer& buf )
{
if ( not buf.buffer ) return;
ASSERT( m_renderDevice );
uint32_t const index = buf.index;
uint32_t const inner_index = index & kIndexMask;
uint32_t const generation = ( index & kGenerationMask ) >> kGenerationOffset;
RenderDevice const& render_device = *m_renderDevice;
vmaDestroyBuffer( render_device.gpuAllocator, Take( buf.buffer ), Take( buf.allocation ) );
buf.size = 0;
buf.mappedData = nullptr;
buf.index = inner_index | ( generation + 1 ) << kGenerationOffset;
// NOTE: DO NOT EDIT INNER INDEX.
ASSERT( inner_index == ( buf.index & kIndexMask ) and "Index should not be modified" );
ASSERT( buf.index > index and "Generation should increase." );
m_freeList.PushBack( reinterpret_cast<Util::FreeList::Node*>( &buf ) );
--m_count;
}
Blaze::Buffer& BufferManager::FetchBufferUnchecked( BufferID const& rid )
{
uint32_t const index = *reinterpret_cast<uint32_t const*>( &rid );
uint32_t const inner_index = index & kIndexMask;
return m_buffers[inner_index];
}
void BufferManager::WriteToBufferImpl( BufferID const& rid, void const* data, size_t const size )
{
ASSERT( IsValidID( rid ) );
Buffer const& buffer = FetchBufferUnchecked( rid );
ASSERT( size <= buffer.size );
memcpy( buffer.mappedData, data, size );
}
bool BufferManager::IsValidID( BufferID const& rid ) const
{
uint32_t const index = *reinterpret_cast<uint32_t const*>( &rid );
uint32_t const inner_index = index & kIndexMask;
if ( inner_index > m_capacity ) return false;
return m_buffers[inner_index].index == index;
}
Blaze::BufferID BufferManager::CreateVertexBuffer( size_t const size )
{
ASSERT( not m_freeList.Empty() );
Buffer* buffer_slot = reinterpret_cast<Buffer*>( m_freeList.PopFront() );
++m_count;
ASSERT( m_renderDevice );
RenderDevice const& render_device = *m_renderDevice;
VkBufferCreateInfo const buffer_create_info = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = size,
.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr allocation_create_info = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocation_info;
VkBuffer vertex_buffer;
VmaAllocation vertex_buffer_allocation;
VK_CHECK( vmaCreateBuffer(
render_device.gpuAllocator,
&buffer_create_info,
&allocation_create_info,
&vertex_buffer,
&vertex_buffer_allocation,
&allocation_info ) );
// NOTE: textureSlot preserves index between uses.
uint32_t index = buffer_slot->index;
new ( buffer_slot ) Buffer{
.buffer = vertex_buffer,
.allocation = vertex_buffer_allocation,
.mappedData = static_cast<std::byte*>( allocation_info.pMappedData ),
.deviceAddress = 0,
.size = size,
.index = index,
};
// NOTE: Memory hackery to create TextureID;
return *reinterpret_cast<BufferID*>( &index );
}
Blaze::BufferID BufferManager::CreateIndexBuffer( size_t const size )
{
ASSERT( not m_freeList.Empty() );
Buffer* buffer_slot = reinterpret_cast<Buffer*>( m_freeList.PopFront() );
++m_count;
ASSERT( m_renderDevice );
RenderDevice const& render_device = *m_renderDevice;
VkBufferCreateInfo const buffer_create_info = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = size,
.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr allocation_create_info = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocation_info;
VkBuffer index_buffer;
VmaAllocation index_buffer_allocation;
VK_CHECK( vmaCreateBuffer(
render_device.gpuAllocator,
&buffer_create_info,
&allocation_create_info,
&index_buffer,
&index_buffer_allocation,
&allocation_info ) );
// NOTE: bufferSlot preserves index between uses.
uint32_t index = buffer_slot->index;
new ( buffer_slot ) Buffer{
.buffer = index_buffer,
.allocation = index_buffer_allocation,
.mappedData = static_cast<std::byte*>( allocation_info.pMappedData ),
.deviceAddress = 0,
.size = size,
.index = index,
};
// NOTE: Memory hackery to create BufferID;
return *reinterpret_cast<BufferID*>( &index );
}
Blaze::BufferID BufferManager::CreateStorageBuffer( size_t const size )
{
ASSERT( not m_freeList.Empty() );
Buffer* buffer_slot = reinterpret_cast<Buffer*>( m_freeList.PopFront() );
++m_count;
ASSERT( m_renderDevice );
RenderDevice const& render_device = *m_renderDevice;
VkBufferCreateInfo const buffer_create_info = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = size,
.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr allocation_create_info = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocation_info;
VkBuffer storage_buffer;
VmaAllocation storage_buffer_allocation;
VK_CHECK( vmaCreateBuffer(
render_device.gpuAllocator,
&buffer_create_info,
&allocation_create_info,
&storage_buffer,
&storage_buffer_allocation,
&allocation_info ) );
VkBufferDeviceAddressInfo const device_address_info = {
.sType = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO,
.pNext = nullptr,
.buffer = storage_buffer,
};
VkDeviceAddress const device_address = vkGetBufferDeviceAddress( render_device.device, &device_address_info );
// NOTE: bufferSlot preserves index between uses.
uint32_t index = buffer_slot->index;
new ( buffer_slot ) Buffer{
.buffer = storage_buffer,
.allocation = storage_buffer_allocation,
.mappedData = static_cast<std::byte*>( allocation_info.pMappedData ),
.deviceAddress = device_address,
.size = size,
.index = index,
};
// NOTE: Memory hackery to create BufferID;
return *reinterpret_cast<BufferID*>( &index );
}
void BufferManager::FreeBuffer( BufferID* rid )
{
if ( not IsValidID( *rid ) ) return;
Buffer& buffer = FetchBufferUnchecked( *rid );
DestroyBuffer( buffer );
*rid = {};
}
std::optional<VkBuffer> BufferManager::FetchBuffer( BufferID const& rid )
{
if ( not IsValidID( rid ) ) return std::nullopt;
return FetchBufferUnchecked( rid ).buffer;
}
std::optional<VkDeviceAddress> BufferManager::FetchDeviceAddress( BufferID const& rid )
{
if ( not IsValidID( rid ) ) return std::nullopt;
Buffer const& buffer = FetchBufferUnchecked( rid );
if ( buffer.deviceAddress == 0 ) return std::nullopt;
return buffer.deviceAddress;
}
BufferManager::BufferManager( RenderDevice* render_device, Buffer* buffers, uint32_t const capacity )
: m_renderDevice{ render_device }
, m_buffers{ buffers }
, m_count{ 0 }
, m_capacity{ capacity }
{
uint32_t i = 0;
for ( Buffer& tex : std::span{ m_buffers, m_capacity } )
{
// Default Generation is 1
tex.index = i++ | ( 1 << kGenerationOffset );
m_freeList.PushFront( reinterpret_cast<Util::FreeList::Node*>( &tex ) );
}
}
void BufferManager::Destroy()
{
#if defined( _DEBUG )
if ( m_count > 0 )
{
SDL_LogError( SDL_LOG_CATEGORY_ERROR, "%u buffers still allocated.", m_count );
}
#endif
while ( not m_freeList.Empty() )
{
Buffer* buf = reinterpret_cast<Buffer*>( m_freeList.PopFront() );
memset( buf, 0, sizeof *buf );
}
for ( Buffer& buf : std::span{ m_buffers, m_count } )
{
DestroyBuffer( buf );
}
}
BufferManager::~BufferManager()
{
ASSERT( not m_buffers );
}
BufferManager* BufferManager::Create( GlobalMemory* mem, RenderDevice* render_device, uint32_t const max_count )
{
Buffer* buffers = reinterpret_cast<Buffer*>( mem->Allocate( max_count * sizeof( Buffer ), alignof( Buffer ) ) );
if ( not buffers ) return nullptr;
std::byte* allocation = mem->Allocate( sizeof( BufferManager ), alignof( BufferManager ) );
if ( not allocation ) return nullptr;
return new ( allocation ) BufferManager{ render_device, buffers, max_count };
}

View File

@ -0,0 +1,92 @@
#pragma once
#include <optional>
#include <span>
#include "FreeList.h"
#include "MacroUtils.h"
#include "RID.h"
#include "RenderDevice.h"
#include "VulkanHeader.h"
namespace Blaze
{
struct GlobalMemory;
struct RenderDevice;
struct Buffer
{
VkBuffer buffer;
VmaAllocation allocation;
std::byte* mappedData; // Assume the system has ReBAR/SAM enabled.
VkDeviceAddress deviceAddress;
size_t size;
uint32_t index;
};
static_assert( sizeof( Buffer ) > sizeof( Util::FreeList::Node ) and "Buffer is used intrusively by FreeList" );
static_assert(
offsetof( Buffer, index ) >= sizeof( Util::FreeList::Node ) and
"Index should not be overwritten even in invalid state" );
using BufferID = RID<Buffer>;
struct BufferManager
{
private:
constexpr static uint32_t kIndexMask = 0x0007FFFF;
constexpr static uint32_t kGenerationMask = ~kIndexMask;
constexpr static uint32_t kGenerationOffset = 19;
static_assert(
( ( kGenerationMask >> kGenerationOffset & 0x1 ) == 0x1 ) and
( ( kGenerationMask >> ( kGenerationOffset - 1 ) & 0x1 ) != 0x1 ) and "Checks boundary" );
RenderDevice* m_renderDevice;
// Buffer Manager
Buffer* m_buffers;
uint32_t m_count;
uint32_t m_capacity;
Util::FreeList m_freeList;
void DestroyBuffer( Buffer& buf );
Buffer& FetchBufferUnchecked( BufferID const& rid );
void WriteToBufferImpl( BufferID const& rid, void const* data, size_t size );
public:
[[nodiscard]] bool IsValidID( BufferID const& rid ) const;
BufferID CreateVertexBuffer( size_t size );
BufferID CreateIndexBuffer( size_t size );
BufferID CreateStorageBuffer( size_t size );
void FreeBuffer( BufferID* rid );
DEPRECATE_JULY_2025
std::optional<VkBuffer> FetchBuffer( BufferID const& rid );
std::optional<VkDeviceAddress> FetchDeviceAddress( BufferID const& rid );
// Utility to directly muck the data
void WriteToBuffer( BufferID const& rid, std::ranges::contiguous_range auto const& data )
{
WriteToBufferImpl(
rid,
std::ranges::data( data ),
std::ranges::size( data ) * sizeof( std::ranges::range_value_t<decltype( data )> ) );
}
static BufferManager* Create( GlobalMemory* mem, RenderDevice* render_device, uint32_t max_count );
void Destroy();
BufferManager( RenderDevice* render_device, Buffer* buffers, uint32_t capacity );
BufferManager( BufferManager const& other ) = delete;
BufferManager( BufferManager&& other ) noexcept = delete;
BufferManager& operator=( BufferManager const& other ) = delete;
BufferManager& operator=( BufferManager&& other ) noexcept = delete;
~BufferManager();
};
} // namespace Blaze

View File

@ -1,3 +1,3 @@
#define CGLTF_IMPLEMENTATION
#include <cgltf.h>
#include <cgltf.h>

View File

@ -0,0 +1,192 @@
#include "EntityManager.h"
#include <array>
#include "GlobalMemory.h"
#include "RenderDevice.h"
#include "Frame.h"
#include "TextureManager.h"
namespace Blaze
{
Entity& EntitySiblingIterable::Iterator::operator++()
{
current = current->NextSibling();
return *current;
}
bool EntitySiblingIterable::Iterator::operator==( Iterator const& other ) const
{
return current == other.current;
}
Entity& EntitySiblingIterable::Iterator::operator*() const
{
return *current;
}
EntitySiblingIterable::Iterator EntitySiblingIterable::begin()
{
return { current };
}
EntitySiblingIterable::Iterator EntitySiblingIterable::end()
{
return {};
}
void Entity::SetParent( Entity* parent )
{
ASSERT( parent );
if ( m_parent == parent ) return;
RemoveParent();
// Insert self into GetParent.
m_parent = parent;
Entity* old_head = parent->m_firstChild;
if ( old_head )
{
// Old head is next after this
this->m_nextSibling = old_head;
// This is prev to old head
old_head->m_prevSibling = this;
}
// We are the head now.
m_parent->m_firstChild = this;
}
void Entity::AddChild( Entity* child )
{
child->SetParent( this );
}
void Entity::RemoveChild( Entity* child )
{
ASSERT( child );
child->RemoveParent();
}
void Entity::RemoveParent()
{
if ( m_parent )
{
// Replace prev of next with prev of self
if ( m_nextSibling ) m_nextSibling->m_prevSibling = m_prevSibling;
// Replace next of prev with next of self
if ( m_prevSibling )
{
m_prevSibling->m_nextSibling = m_nextSibling;
}
else
{
// We are head of chain
m_parent->m_firstChild = m_nextSibling;
}
m_nextSibling = nullptr;
m_prevSibling = nullptr;
m_parent = nullptr;
}
}
EntitySiblingIterable Entity::IterChildren() const
{
return { m_firstChild };
}
Entity::Entity( Transform const& transform )
: transform{ transform }
, model{}
, modelMesh{}
, m_parent{ nullptr }
, m_firstChild{ nullptr }
, m_prevSibling{ nullptr }
, m_nextSibling{ nullptr }
, m_flags{ 0 }
{}
EntityManager::EntityManager( RenderDevice* render_device, Entity* data, uint32_t const capacity )
: m_renderDevice{ render_device }
, m_entities{ data }
, m_count{ 0 }
, m_capacity{ capacity }
{}
EntityManager::Iterable EntityManager::Iterate() const
{
return Iterable{ m_entities, m_count };
}
Entity* EntityManager::CreateEntity( Transform const& transform )
{
ASSERT( m_count < m_capacity );
Entity& entity = m_entities[m_count++];
new ( &entity ) Entity{ transform };
return &entity;
}
void EntityManager::DestroyEntity( Entity* entity )
{
ASSERT( entity );
VkDevice const device = m_renderDevice->device;
if ( not entity->model.IsNull() )
{
for ( Material& material : entity->model.materials )
{
vkDestroySampler( device, Take( material.sampler ), nullptr );
m_renderDevice->textureManager->FreeTexture( &material.albedoTextureID );
m_renderDevice->textureManager->FreeTexture( &material.normalTextureID );
m_renderDevice->textureManager->FreeTexture( &material.metalRoughTextureID );
m_renderDevice->textureManager->FreeTexture( &material.emissiveTextureID );
}
m_renderDevice->bufferManager->FreeBuffer( &entity->model.vertexBuffer );
m_renderDevice->bufferManager->FreeBuffer( &entity->model.indexBuffer );
entity->model.primitives.clear();
entity->model.materials.clear();
}
entity->modelMesh = { 0, 0 };
}
void EntityManager::Destroy()
{
Entity const* end = m_entities + m_capacity;
for ( Entity* iter = m_entities; iter != end; ++iter )
{
DestroyEntity( iter );
}
m_entities = nullptr;
m_capacity = 0;
m_count = 0;
}
EntityManager::~EntityManager()
{
assert( !m_entities );
}
EntityManager* EntityManager::Create( GlobalMemory* mem, RenderDevice* render_device, uint32_t const capacity )
{
Entity* data = reinterpret_cast<Entity*>( mem->Allocate( capacity * sizeof( Entity ), alignof( Entity ) ) );
memset( data, 0, capacity * sizeof( Entity ) );
std::byte* alloc = mem->Allocate( sizeof( EntityManager ), alignof( EntityManager ) );
return new ( alloc ) EntityManager{ render_device, data, capacity };
}
} // namespace Blaze

View File

@ -3,17 +3,15 @@
#include <cstdint>
#include <DirectXMath.h>
#include <span>
#include "VulkanHeader.h"
// TODO: Remove this dependency
#include "BufferManager.h"
#include "ModelLoader.h"
#include "TextureManager.h"
namespace Blaze
{
struct GlobalMemory;
struct Entity;
struct RenderDevice;
struct GlobalMemory;
struct Transform
{
@ -53,31 +51,31 @@ private:
uint64_t m_flags; // FIXME: Wasting space.
public:
[[nodiscard]] bool isRoot() const
[[nodiscard]] bool IsRoot() const
{
return not m_parent;
}
[[nodiscard]] Entity* parent() const
[[nodiscard]] Entity* GetParent() const
{
return m_parent;
}
[[nodiscard]] Entity* nextSibling() const
[[nodiscard]] Entity* NextSibling() const
{
return m_nextSibling;
}
void setParent( Entity* parent );
void SetParent( Entity* parent );
void addChild( Entity* child );
void AddChild( Entity* child );
void removeChild( Entity* child );
void RemoveChild( Entity* child );
// Remove self from parent
void removeParent();
// Remove self from GetParent
void RemoveParent();
[[nodiscard]] EntitySiblingIterable children() const;
[[nodiscard]] EntitySiblingIterable IterChildren() const;
explicit Entity( Transform const& transform );
};
@ -105,28 +103,26 @@ struct EntityManager
}
};
RenderDevice* pRenderDevice;
Entity* entities;
uint32_t count;
uint32_t capacity;
private:
RenderDevice* m_renderDevice;
Entity* m_entities;
uint32_t m_count;
uint32_t m_capacity;
EntityManager( RenderDevice* renderDevice, Entity* data, uint32_t const capacity )
: pRenderDevice{ renderDevice }, entities{ data }, count{ 0 }, capacity{ capacity }
{}
public:
EntityManager( RenderDevice* render_device, Entity* data, uint32_t capacity );
[[nodiscard]] Iterable iter() const
{
return Iterable{ entities, count };
}
[[nodiscard]] Iterable Iterate() const;
// Make Entities return ID, make it a sparse indexing system.
Entity* createEntity( Transform const& transform );
Entity* CreateEntity( Transform const& transform );
void destroyEntity( Entity* entity );
void DestroyEntity( Entity* entity );
void destroy();
static EntityManager* Create( GlobalMemory* mem, RenderDevice* render_device, uint32_t capacity );
void Destroy();
~EntityManager();
};
EntityManager* EntityManager_Create( GlobalMemory* mem, RenderDevice* renderDevice, uint32_t capacity );
} // namespace Blaze

176
Blaze/Source/Frame.cpp Normal file
View File

@ -0,0 +1,176 @@
#include "Frame.h"
#include <SDL3/SDL_log.h>
#include "MacroUtils.h"
#include "RenderDevice.h"
using Blaze::Frame;
bool Blaze::Frame::IsInit() const
{
return static_cast<bool>( commandPool );
}
Frame::Frame(
VkCommandPool const command_pool,
VkCommandBuffer const command_buffer,
VkSemaphore const image_acquired_semaphore,
VkSemaphore const render_finished_semaphore,
VkFence const frame_ready_to_reuse,
VkImage const depth_image,
VmaAllocation const depth_allocation,
VkImageView const depth_view )
: commandPool{ command_pool }
, commandBuffer{ command_buffer }
, imageAcquiredSemaphore{ image_acquired_semaphore }
, renderFinishedSemaphore{ render_finished_semaphore }
, frameReadyToReuse{ frame_ready_to_reuse }
, depthImage{ depth_image }
, depthAllocation{ depth_allocation }
, depthView{ depth_view }
{}
void Frame::Destroy( RenderDevice const& render_device )
{
if ( !IsInit() ) return;
VkDevice const device = render_device.device;
vkDestroyImageView( device, Take( depthView ), nullptr );
vmaDestroyImage( render_device.gpuAllocator, Take( depthImage ), Take( depthAllocation ) );
vkDestroyCommandPool( device, Take( commandPool ), nullptr );
vkDestroyFence( device, Take( frameReadyToReuse ), nullptr );
vkDestroySemaphore( device, Take( imageAcquiredSemaphore ), nullptr );
vkDestroySemaphore( device, Take( renderFinishedSemaphore ), nullptr );
}
Frame Frame::Create(
VkDevice const device,
VmaAllocator const gpu_allocator,
uint32_t const direct_queue_family_index,
VkExtent2D const swapchain_extent )
{
VkCommandPool command_pool;
VkCommandBuffer command_buffer;
VkSemaphore image_acquired_semaphore;
VkSemaphore render_finished_semaphore;
VkFence frame_ready_to_reuse;
VkImage depth_image;
VmaAllocation depth_allocation;
VkImageView depth_view;
{
VkCommandPoolCreateInfo const command_pool_create_info = {
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
.pNext = nullptr,
.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT,
.queueFamilyIndex = direct_queue_family_index,
};
VK_CHECK( vkCreateCommandPool( device, &command_pool_create_info, nullptr, &command_pool ) );
VkCommandBufferAllocateInfo const command_buffer_allocate_info = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
.pNext = nullptr,
.commandPool = command_pool,
.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
.commandBufferCount = 1,
};
VK_CHECK( vkAllocateCommandBuffers( device, &command_buffer_allocate_info, &command_buffer ) );
VkSemaphoreCreateInfo constexpr semaphore_create_info = {
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
};
VK_CHECK( vkCreateSemaphore( device, &semaphore_create_info, nullptr, &image_acquired_semaphore ) );
VK_CHECK( vkCreateSemaphore( device, &semaphore_create_info, nullptr, &render_finished_semaphore ) );
VkFenceCreateInfo constexpr fence_create_info = {
.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
.pNext = nullptr,
.flags = VK_FENCE_CREATE_SIGNALED_BIT,
};
VK_CHECK( vkCreateFence( device, &fence_create_info, nullptr, &frame_ready_to_reuse ) );
}
{
VkImageCreateInfo const depth_image_create_info = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.imageType = VK_IMAGE_TYPE_2D,
.format = VK_FORMAT_D32_SFLOAT,
.extent = { swapchain_extent.width, swapchain_extent.height, 1 },
.mipLevels = 1,
.arrayLayers = 1,
.samples = VK_SAMPLE_COUNT_1_BIT,
.tiling = VK_IMAGE_TILING_OPTIMAL,
.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED
};
VmaAllocationCreateInfo constexpr depth_allocation_create_info = {
.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
.usage = VMA_MEMORY_USAGE_GPU_ONLY,
.requiredFlags = 0,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VK_CHECK( vmaCreateImage(
gpu_allocator,
&depth_image_create_info,
&depth_allocation_create_info,
&depth_image,
&depth_allocation,
nullptr ) );
VkImageSubresourceRange constexpr subresource_range = {
.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkComponentMapping constexpr component_mapping = {
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
.b = VK_COMPONENT_SWIZZLE_IDENTITY,
.a = VK_COMPONENT_SWIZZLE_IDENTITY,
};
VkImageViewCreateInfo const image_view_create_info = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.image = depth_image,
.viewType = VK_IMAGE_VIEW_TYPE_2D,
.format = depth_image_create_info.format,
.components = component_mapping,
.subresourceRange = subresource_range,
};
VK_CHECK( vkCreateImageView( device, &image_view_create_info, nullptr, &depth_view ) );
}
return Frame{
command_pool,
command_buffer,
image_acquired_semaphore,
render_finished_semaphore,
frame_ready_to_reuse,
depth_image,
depth_allocation,
depth_view,
};
}

39
Blaze/Source/Frame.h Normal file
View File

@ -0,0 +1,39 @@
#pragma once
#include "VulkanHeader.h"
namespace Blaze
{
struct RenderDevice;
struct Frame
{
VkCommandPool commandPool;
VkCommandBuffer commandBuffer;
VkSemaphore imageAcquiredSemaphore;
VkSemaphore renderFinishedSemaphore;
VkFence frameReadyToReuse;
VkImage depthImage;
VmaAllocation depthAllocation;
VkImageView depthView;
[[nodiscard]] bool IsInit() const;
Frame(
VkCommandPool command_pool,
VkCommandBuffer command_buffer,
VkSemaphore image_acquired_semaphore,
VkSemaphore render_finished_semaphore,
VkFence frame_ready_to_reuse,
VkImage depth_image,
VmaAllocation depth_allocation,
VkImageView depth_view );
static Frame Create(
VkDevice device, VmaAllocator gpu_allocator, uint32_t direct_queue_family_index, VkExtent2D swapchain_extent );
void Destroy( RenderDevice const& render_device );
};
} // namespace Blaze

79
Blaze/Source/FreeList.cpp Normal file
View File

@ -0,0 +1,79 @@
#include "FreeList.h"
#include "MacroUtils.h"
namespace Blaze::Util
{
FreeList::Iterator& FreeList::Iterator::operator++()
{
iter = iter->next;
return *this;
}
bool FreeList::Iterator::operator==( Iterator const& other ) const
{
return this->iter == other.iter;
}
FreeList::Node& FreeList::Iterator::operator*() const
{
return *iter;
}
FreeList::FreeList() : m_head{ .next = &m_tail, .prev = nullptr }, m_tail{ .next = nullptr, .prev = &m_head }
{}
void FreeList::PushBack( Node* node )
{
Node* prev = m_tail.prev;
// Set prev as previous of node
prev->next = node;
node->prev = prev;
// Set tail as next of node
node->next = &m_tail;
m_tail.prev = node;
}
void FreeList::PushFront( Node* node )
{
Node* next = m_head.next;
// Set next as next of node
next->prev = node;
node->next = next;
// Set head as prev of node
node->prev = &m_head;
m_head.next = node;
}
FreeList::Node* FreeList::PopFront()
{
ASSERT( not Empty() );
Node* element = m_head.next;
m_head.next = element->next;
element->next->prev = &m_head;
return element;
}
bool FreeList::Empty() const
{
return m_head.next == &m_tail;
}
FreeList::Iterator FreeList::begin()
{
return { m_head.next };
}
FreeList::Iterator FreeList::end()
{
return { &m_tail };
}
} // namespace Blaze::Util

View File

@ -1,21 +1,23 @@
#pragma once
#include "MacroUtils.h"
namespace Blaze::Util
{
struct FreeList
{
struct Node
{
Node* pNext;
Node* pPrev;
Node* next;
Node* prev;
};
struct Iterator
{
Node* pIter;
Node* iter;
Iterator& operator++();
bool operator==( Iterator const& other ) const;
Node& operator*();
Node& operator*() const;
};
private:
@ -25,10 +27,10 @@ private:
public:
FreeList();
void pushBack( Node* pNode );
void pushFront( Node* pNode );
Node* popFront();
[[nodiscard]] bool empty() const;
void PushBack( Node* node );
void PushFront( Node* node );
Node* PopFront();
[[nodiscard]] bool Empty() const;
Iterator begin();
Iterator end();
@ -40,3 +42,5 @@ public:
~FreeList() = default;
};
} // namespace Blaze::Util

View File

@ -0,0 +1,78 @@
#include "GlobalMemory.h"
#include <SDL3/SDL_log.h>
namespace Blaze
{
void GlobalMemory::Initialize( size_t const size )
{
memory = new std::byte[size];
capacity = size;
available = size;
}
void GlobalMemory::Destroy()
{
std::byte const* original_memory = memory - ( capacity - available );
delete[] original_memory;
memory = nullptr;
available = 0;
capacity = 0;
}
std::byte* GlobalMemory::Allocate( size_t const size )
{
assert( size <= available && "No enough space available" );
std::byte* ret_val = memory;
memset( ret_val, 0, size );
memory += size;
available -= size;
SDL_LogInfo(
SDL_LOG_CATEGORY_SYSTEM,
"ALLOC: %p -> %p (%llu) (avail: %llu)",
reinterpret_cast<void*>( ret_val ),
reinterpret_cast<void*>( memory ),
size,
available );
return ret_val;
}
std::byte* GlobalMemory::Allocate( size_t const size, size_t const alignment )
{
uintptr_t const addr = reinterpret_cast<uintptr_t>( memory );
uintptr_t const found_offset = addr % alignment;
if ( found_offset == 0 )
{
return Allocate( size );
}
uintptr_t const offset = alignment - found_offset;
size_t const allocation_size = size + offset;
return offset + Allocate( allocation_size );
}
GlobalMemory::State GlobalMemory::GetState() const
{
SDL_LogInfo( SDL_LOG_CATEGORY_SYSTEM, "TEMP: %p %llu", reinterpret_cast<void*>( memory ), available );
return {
.memory = memory,
.available = available,
};
}
void GlobalMemory::RestoreState( State const& state )
{
ASSERT( memory >= state.memory ); //< Behind top of allocator
ASSERT( memory - ( capacity - available ) <= state.memory ); //< Ahead of start of allocator
SDL_LogInfo( SDL_LOG_CATEGORY_SYSTEM, "RESTORE: %p %llu", reinterpret_cast<void*>( memory ), available );
memory = state.memory;
available = state.available;
}
} // namespace Blaze

View File

@ -1,9 +1,9 @@
#pragma once
#include <cstdint>
#include "MacroUtils.h"
namespace Blaze
{
consteval size_t operator""_KiB( size_t const value )
{
return value * 1024;
@ -19,27 +19,30 @@ consteval size_t operator""_GiB( size_t const value )
return value * 1024_MiB;
}
using byte = std::byte;
struct GlobalMemory
{
struct State
{
std::byte* memory;
size_t available;
byte* memory;
size_t available;
};
std::byte* memory;
size_t available;
size_t capacity;
byte* memory;
size_t available;
size_t capacity;
void init( size_t size );
void destroy();
void Initialize( size_t size );
void Destroy();
[[nodiscard]]
std::byte* allocate( size_t size );
byte* Allocate( size_t size );
[[nodiscard]]
std::byte* allocate( size_t size, size_t alignment );
byte* Allocate( size_t size, size_t alignment );
[[nodiscard]]
State getState() const; //< Do not do any permanent allocations after calling this.
void restoreState( State const& state ); //< Call this before permanent allocations.
State GetState() const; //< Do not do any permanent allocations after calling this.
void RestoreState( State const& state ); //< Call this before permanent allocations.
};
} // namespace Blaze

View File

@ -8,38 +8,41 @@
#include "MacroUtils.h"
#include "RenderDevice.h"
bool MiscData::init( RenderDevice const& renderDevice )
namespace Blaze
{
VkDevice const device = renderDevice.device;
bool MiscData::Init( RenderDevice const& render_device )
{
VkDevice const device = render_device.device;
previousCounter = 0;
// Pipeline Creation
{
size_t dataSize;
void* rawData = SDL_LoadFile( "Mesh.spv", &dataSize );
ASSERT( dataSize % 4 == 0 );
size_t data_size;
void* raw_data = SDL_LoadFile( "Mesh.spv", &data_size );
ASSERT( data_size % 4 == 0 );
if ( !rawData )
if ( !raw_data )
{
SDL_LogError( SDL_LOG_CATEGORY_SYSTEM, "%s", SDL_GetError() );
return false;
}
uint32_t const* data = static_cast<uint32_t const*>( rawData );
uint32_t const* data = static_cast<uint32_t const*>( raw_data );
// Create Shader Module
VkShaderModuleCreateInfo const shaderModuleCreateInfo = {
VkShaderModuleCreateInfo const shader_module_create_info = {
.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.codeSize = dataSize,
.codeSize = data_size,
.pCode = data,
};
VkShaderModule shaderModule;
VK_CHECK( vkCreateShaderModule( device, &shaderModuleCreateInfo, nullptr, &shaderModule ) );
VkShaderModule shader_module;
VK_CHECK( vkCreateShaderModule( device, &shader_module_create_info, nullptr, &shader_module ) );
VkDescriptorSetLayoutBinding constexpr perFrameDescriptorBinding{
VkDescriptorSetLayoutBinding constexpr per_frame_descriptor_binding{
.binding = 0,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.descriptorCount = 1,
@ -47,116 +50,86 @@ bool MiscData::init( RenderDevice const& renderDevice )
.pImmutableSamplers = nullptr,
};
VkDescriptorSetLayoutCreateInfo perFrameDescriptorSetLayoutCreateInfo = {
VkDescriptorSetLayoutCreateInfo per_frame_descriptor_set_layout_create_info{
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.bindingCount = 1,
.pBindings = &perFrameDescriptorBinding,
.pBindings = &per_frame_descriptor_binding,
};
VK_CHECK(
vkCreateDescriptorSetLayout( device, &perFrameDescriptorSetLayoutCreateInfo, nullptr, &descriptorSetLayout ) );
VK_CHECK( vkCreateDescriptorSetLayout(
device, &per_frame_descriptor_set_layout_create_info, nullptr, &descriptorSetLayout ) );
VkPushConstantRange const pushConstantRange = {
VkPushConstantRange constexpr push_constant_range{
.stageFlags = VK_SHADER_STAGE_ALL_GRAPHICS,
.offset = 0,
.size = 2 * sizeof( DirectX::XMMATRIX ) + Material::GPU_DATA_SIZE,
.size = 2 * sizeof( DirectX::XMMATRIX ) + Material::kGPUDataSize,
};
std::array const descriptorSetLayouts = {
renderDevice.textureManager->descriptorLayout(),
VkDescriptorSetLayout const descriptor_set_layouts[] = {
render_device.textureManager->DescriptorLayout(),
descriptorSetLayout,
};
VkPipelineLayoutCreateInfo const pipelineLayoutCreateInfo = {
VkPipelineLayoutCreateInfo const pipeline_layout_create_info = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.setLayoutCount = static_cast<uint32_t>( descriptorSetLayouts.size() ),
.pSetLayouts = descriptorSetLayouts.data(),
.setLayoutCount = _countof( descriptor_set_layouts ),
.pSetLayouts = descriptor_set_layouts,
.pushConstantRangeCount = 1,
.pPushConstantRanges = &pushConstantRange,
.pPushConstantRanges = &push_constant_range,
};
VK_CHECK( vkCreatePipelineLayout( device, &pipelineLayoutCreateInfo, nullptr, &pipelineLayout ) );
VK_CHECK( vkCreatePipelineLayout( device, &pipeline_layout_create_info, nullptr, &pipelineLayout ) );
std::array stages = {
VkPipelineShaderStageCreateInfo{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.stage = VK_SHADER_STAGE_VERTEX_BIT,
.module = shaderModule,
.pName = "VertexMain",
.pSpecializationInfo = nullptr,
},
VkPipelineShaderStageCreateInfo{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.stage = VK_SHADER_STAGE_FRAGMENT_BIT,
.module = shaderModule,
.pName = "FragmentMain",
.pSpecializationInfo = nullptr,
}
VkPipelineShaderStageCreateInfo stages[] = {
{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.stage = VK_SHADER_STAGE_VERTEX_BIT,
.module = shader_module,
.pName = "VertexMain",
.pSpecializationInfo = nullptr,
},
{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.stage = VK_SHADER_STAGE_FRAGMENT_BIT,
.module = shader_module,
.pName = "FragmentMain",
.pSpecializationInfo = nullptr,
}
};
// Bindings
VkVertexInputBindingDescription constexpr bindingDescription = {
VkVertexInputBindingDescription constexpr binding_description = {
.binding = 0,
.stride = sizeof( Vertex ),
.inputRate = VK_VERTEX_INPUT_RATE_VERTEX,
};
std::array attributeDescriptions = {
VkVertexInputAttributeDescription{
.location = 0,
.binding = 0,
.format = VK_FORMAT_R32G32B32_SFLOAT,
.offset = offsetof( Vertex, position ),
},
VkVertexInputAttributeDescription{
.location = 1,
.binding = 0,
.format = VK_FORMAT_R32G32B32_SFLOAT,
.offset = offsetof( Vertex, normal ),
},
VkVertexInputAttributeDescription{
.location = 2,
.binding = 0,
.format = VK_FORMAT_R32G32B32A32_SFLOAT,
.offset = offsetof( Vertex, tangent ),
},
VkVertexInputAttributeDescription{
.location = 3,
.binding = 0,
.format = VK_FORMAT_R32G32_SFLOAT,
.offset = offsetof( Vertex, texCoord0 ),
},
VkVertexInputAttributeDescription{
.location = 4,
.binding = 0,
.format = VK_FORMAT_R32G32_SFLOAT,
.offset = offsetof( Vertex, texCoord1 ),
},
VkVertexInputAttributeDescription{
.location = 5,
.binding = 0,
.format = VK_FORMAT_R32G32B32A32_SFLOAT,
.offset = offsetof( Vertex, color0 ),
},
VkVertexInputAttributeDescription attribute_descriptions[] = {
{.location = 0, .binding = 0, .format = VK_FORMAT_R32G32B32_SFLOAT, .offset = offsetof( Vertex, position ) },
{.location = 1, .binding = 0, .format = VK_FORMAT_R32G32B32_SFLOAT, .offset = offsetof( Vertex, normal ) },
{.location = 2, .binding = 0, .format = VK_FORMAT_R32G32B32A32_SFLOAT, .offset = offsetof( Vertex, tangent ) },
{.location = 3, .binding = 0, .format = VK_FORMAT_R32G32_SFLOAT, .offset = offsetof( Vertex, texCoord0 )},
{.location = 4, .binding = 0, .format = VK_FORMAT_R32G32_SFLOAT, .offset = offsetof( Vertex, texCoord1 )},
{.location = 5, .binding = 0, .format = VK_FORMAT_R32G32B32A32_SFLOAT, .offset = offsetof( Vertex, color0 ) },
};
VkPipelineVertexInputStateCreateInfo const vertexInputState = {
VkPipelineVertexInputStateCreateInfo const vertex_input_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.vertexBindingDescriptionCount = 1,
.pVertexBindingDescriptions = &bindingDescription,
.vertexAttributeDescriptionCount = static_cast<uint32_t>( attributeDescriptions.size() ),
.pVertexAttributeDescriptions = attributeDescriptions.data(),
.pVertexBindingDescriptions = &binding_description,
.vertexAttributeDescriptionCount = _countof( attribute_descriptions ),
.pVertexAttributeDescriptions = attribute_descriptions,
};
VkPipelineInputAssemblyStateCreateInfo constexpr inputAssembly = {
VkPipelineInputAssemblyStateCreateInfo const input_assembly = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
@ -164,14 +137,14 @@ bool MiscData::init( RenderDevice const& renderDevice )
.primitiveRestartEnable = VK_FALSE,
};
VkPipelineTessellationStateCreateInfo constexpr tessellationState = {
VkPipelineTessellationStateCreateInfo constexpr tessellation_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.patchControlPoints = 0,
};
VkPipelineViewportStateCreateInfo constexpr viewportState = {
VkPipelineViewportStateCreateInfo constexpr viewport_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
@ -181,7 +154,7 @@ bool MiscData::init( RenderDevice const& renderDevice )
.pScissors = nullptr,
};
VkPipelineRasterizationStateCreateInfo constexpr rasterizationState = {
VkPipelineRasterizationStateCreateInfo constexpr rasterization_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
@ -197,7 +170,7 @@ bool MiscData::init( RenderDevice const& renderDevice )
.lineWidth = 1.0f,
};
VkPipelineMultisampleStateCreateInfo constexpr multisampleState = {
VkPipelineMultisampleStateCreateInfo constexpr multisample_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
@ -209,7 +182,7 @@ bool MiscData::init( RenderDevice const& renderDevice )
.alphaToOneEnable = VK_FALSE,
};
VkPipelineDepthStencilStateCreateInfo constexpr depthStencilState = {
VkPipelineDepthStencilStateCreateInfo constexpr depth_stencil_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
@ -224,7 +197,7 @@ bool MiscData::init( RenderDevice const& renderDevice )
.maxDepthBounds = 1.0f,
};
VkPipelineColorBlendAttachmentState constexpr colorBlendAttachmentState = {
VkPipelineColorBlendAttachmentState constexpr color_blend_attachment_state = {
.blendEnable = VK_FALSE,
.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA,
.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
@ -233,52 +206,52 @@ bool MiscData::init( RenderDevice const& renderDevice )
.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO,
.alphaBlendOp = VK_BLEND_OP_ADD,
.colorWriteMask =
VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT,
VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT,
};
VkPipelineColorBlendStateCreateInfo const colorBlendState = {
VkPipelineColorBlendStateCreateInfo const color_blend_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.logicOpEnable = VK_FALSE,
.logicOp = VK_LOGIC_OP_COPY,
.attachmentCount = 1,
.pAttachments = &colorBlendAttachmentState,
.pAttachments = &color_blend_attachment_state,
.blendConstants = { 0.0f, 0.0f, 0.0f, 0.0f },
};
std::array constexpr dynamicStates = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR };
VkDynamicState constexpr dynamic_states[] = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR };
VkPipelineDynamicStateCreateInfo const dynamicStateCreateInfo = {
VkPipelineDynamicStateCreateInfo const dynamic_state_create_info = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.dynamicStateCount = static_cast<uint32_t>( dynamicStates.size() ),
.pDynamicStates = dynamicStates.data()
.dynamicStateCount = _countof( dynamic_states ),
.pDynamicStates = dynamic_states,
};
VkPipelineRenderingCreateInfoKHR const renderingCreateInfo = {
VkPipelineRenderingCreateInfoKHR const rendering_create_info = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO_KHR,
.colorAttachmentCount = 1,
.pColorAttachmentFormats = &renderDevice.swapchainFormat,
.pColorAttachmentFormats = &render_device.swapchainFormat,
.depthAttachmentFormat = VK_FORMAT_D32_SFLOAT,
};
VkGraphicsPipelineCreateInfo const graphicsPipelineCreateInfo = {
VkGraphicsPipelineCreateInfo const graphics_pipeline_create_info = {
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
.pNext = &renderingCreateInfo,
.pNext = &rendering_create_info,
.flags = 0,
.stageCount = static_cast<uint32_t>( stages.size() ),
.pStages = stages.data(),
.pVertexInputState = &vertexInputState,
.pInputAssemblyState = &inputAssembly,
.pTessellationState = &tessellationState,
.pViewportState = &viewportState,
.pRasterizationState = &rasterizationState,
.pMultisampleState = &multisampleState,
.pDepthStencilState = &depthStencilState,
.pColorBlendState = &colorBlendState,
.pDynamicState = &dynamicStateCreateInfo,
.stageCount = _countof( stages ),
.pStages = stages,
.pVertexInputState = &vertex_input_state,
.pInputAssemblyState = &input_assembly,
.pTessellationState = &tessellation_state,
.pViewportState = &viewport_state,
.pRasterizationState = &rasterization_state,
.pMultisampleState = &multisample_state,
.pDepthStencilState = &depth_stencil_state,
.pColorBlendState = &color_blend_state,
.pDynamicState = &dynamic_state_create_info,
.layout = pipelineLayout,
.renderPass = nullptr,
.subpass = 0,
@ -286,11 +259,11 @@ bool MiscData::init( RenderDevice const& renderDevice )
.basePipelineIndex = 0,
};
VK_CHECK( vkCreateGraphicsPipelines( device, nullptr, 1, &graphicsPipelineCreateInfo, nullptr, &meshPipeline ) );
VK_CHECK( vkCreateGraphicsPipelines( device, nullptr, 1, &graphics_pipeline_create_info, nullptr, &meshPipeline ) );
vkDestroyShaderModule( device, shaderModule, nullptr );
vkDestroyShaderModule( device, shader_module, nullptr );
SDL_free( rawData );
SDL_free( raw_data );
}
// Camera
@ -300,7 +273,7 @@ bool MiscData::init( RenderDevice const& renderDevice )
cameraUp = DirectX::XMVectorSet( 0.0f, 1.0f, 0.0f, 1.0f );
cameraData.viewMatrix = DirectX::XMMatrixLookAtLH( cameraData.cameraPosition, cameraTarget, cameraUp );
cameraData.projectionMatrix =
DirectX::XMMatrixPerspectiveFovLH( DirectX::XMConvertToRadians( 70.0f ), 16.0f / 9.0f, 0.1f, 1000.0f );
DirectX::XMMatrixPerspectiveFovLH( DirectX::XMConvertToRadians( 70.0f ), 16.0f / 9.0f, 0.1f, 1000.0f );
cameraUniformBufferSize = sizeof( CameraData ) + sizeof( LightData );
}
@ -308,18 +281,14 @@ bool MiscData::init( RenderDevice const& renderDevice )
// Lights
{
auto pointLightsValue = renderDevice.bufferManager->createStorageBuffer( 10 * sizeof( PointLight ) );
if ( !pointLightsValue ) return false;
pointLights = render_device.bufferManager->CreateStorageBuffer( 10 * sizeof( PointLight ) );
if ( not pointLights ) return false;
pointLights = std::move( pointLightsValue.value() );
directionalLights = render_device.bufferManager->CreateStorageBuffer( 10 * sizeof( DirectionalLight ) );
if ( not directionalLights ) return false;
auto dirLightsValue = renderDevice.bufferManager->createStorageBuffer( 10 * sizeof( DirectionalLight ) );
if ( !dirLightsValue ) return false;
directionalLights = std::move( dirLightsValue.value() );
lightData.pointLights = renderDevice.bufferManager->fetchDeviceAddress( pointLights ).value();
lightData.directionalLights = renderDevice.bufferManager->fetchDeviceAddress( directionalLights ).value();
lightData.pointLights = render_device.bufferManager->FetchDeviceAddress( pointLights ).value();
lightData.directionalLights = render_device.bufferManager->FetchDeviceAddress( directionalLights ).value();
lightData.dirLightCount = 0;
lightData.pointLightCount = 0;
}
@ -327,7 +296,7 @@ bool MiscData::init( RenderDevice const& renderDevice )
// Uniform Buffer
{
VkBufferCreateInfo const bufferCreateInfo = {
VkBufferCreateInfo const buffer_create_info = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
@ -338,7 +307,7 @@ bool MiscData::init( RenderDevice const& renderDevice )
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr allocationCreateInfo = {
VmaAllocationCreateInfo constexpr allocation_create_info = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
@ -349,19 +318,19 @@ bool MiscData::init( RenderDevice const& renderDevice )
.priority = 1.0f,
};
VmaAllocationInfo allocationInfo;
VmaAllocationInfo allocation_info;
VK_CHECK( vmaCreateBuffer(
renderDevice.gpuAllocator,
&bufferCreateInfo,
&allocationCreateInfo,
&cameraUniformBuffer,
&cameraUniformBufferAllocation,
&allocationInfo ) );
render_device.gpuAllocator,
&buffer_create_info,
&allocation_create_info,
&cameraUniformBuffer,
&cameraUniformBufferAllocation,
&allocation_info ) );
if ( allocationInfo.pMappedData )
if ( allocation_info.pMappedData )
{
cameraUniformBufferPtr = static_cast<uint8_t*>( allocationInfo.pMappedData );
cameraUniformBufferPtr = static_cast<uint8_t*>( allocation_info.pMappedData );
memcpy( cameraUniformBufferPtr, &cameraData, sizeof cameraData );
memcpy( cameraUniformBufferPtr + sizeof cameraData, &lightData, sizeof lightData );
}
@ -369,28 +338,22 @@ bool MiscData::init( RenderDevice const& renderDevice )
// Descriptors
{
std::array poolSizes = {
VkDescriptorPoolSize{
.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.descriptorCount = 3,
},
VkDescriptorPoolSize{
.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = 100,
},
VkDescriptorPoolSize pool_sizes[] = {
{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 3 },
{VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 100},
};
VkDescriptorPoolCreateInfo const descriptorPoolCreateInfo = {
VkDescriptorPoolCreateInfo const descriptor_pool_create_info = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.maxSets = 101,
.poolSizeCount = static_cast<uint32_t>( poolSizes.size() ),
.pPoolSizes = poolSizes.data(),
.poolSizeCount = _countof( pool_sizes ),
.pPoolSizes = pool_sizes,
};
VK_CHECK( vkCreateDescriptorPool( device, &descriptorPoolCreateInfo, nullptr, &descriptorPool ) );
VK_CHECK( vkCreateDescriptorPool( device, &descriptor_pool_create_info, nullptr, &descriptorPool ) );
VkDescriptorSetAllocateInfo const descriptorSetAllocateInfo = {
VkDescriptorSetAllocateInfo const descriptor_set_allocate_info = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
.pNext = nullptr,
.descriptorPool = descriptorPool,
@ -398,36 +361,35 @@ bool MiscData::init( RenderDevice const& renderDevice )
.pSetLayouts = &descriptorSetLayout,
};
VK_CHECK( vkAllocateDescriptorSets( device, &descriptorSetAllocateInfo, &descriptorSet ) );
VK_CHECK( vkAllocateDescriptorSets( device, &descriptor_set_allocate_info, &descriptorSet ) );
VkDescriptorBufferInfo const descriptorBufferInfo = {
VkDescriptorBufferInfo const descriptor_buffer_info = {
.buffer = cameraUniformBuffer,
.offset = 0,
.range = cameraUniformBufferSize,
};
std::array writeDescriptorSets = {
VkWriteDescriptorSet{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.pNext = nullptr,
.dstSet = descriptorSet,
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.pImageInfo = nullptr,
.pBufferInfo = &descriptorBufferInfo,
.pTexelBufferView = nullptr,
},
VkWriteDescriptorSet write_descriptor_sets[] = {
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.pNext = nullptr,
.dstSet = descriptorSet,
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.pImageInfo = nullptr,
.pBufferInfo = &descriptor_buffer_info,
.pTexelBufferView = nullptr,
},
};
vkUpdateDescriptorSets(
device, static_cast<uint32_t>( writeDescriptorSets.size() ), writeDescriptorSets.data(), 0, nullptr );
vkUpdateDescriptorSets( device, _countof( write_descriptor_sets ), write_descriptor_sets, 0, nullptr );
}
// Barrier Creation
{
VkImageSubresourceRange subresourceRange = {
VkImageSubresourceRange subresource_range = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
@ -446,7 +408,7 @@ bool MiscData::init( RenderDevice const& renderDevice )
.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.subresourceRange = subresourceRange,
.subresourceRange = subresource_range,
};
acquireToRenderDependency = {
@ -472,7 +434,7 @@ bool MiscData::init( RenderDevice const& renderDevice )
.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.subresourceRange = subresourceRange,
.subresourceRange = subresource_range,
};
renderToPresentDependency = {
@ -497,17 +459,18 @@ bool MiscData::init( RenderDevice const& renderDevice )
return true;
}
void MiscData::destroy( RenderDevice const& renderDevice )
void MiscData::Destroy( Blaze::RenderDevice const& render_device )
{
VkDevice const device = renderDevice.device;
VkDevice const device = render_device.device;
vkDestroyDescriptorPool( device, Take( descriptorPool ), nullptr );
vmaDestroyBuffer( renderDevice.gpuAllocator, Take( cameraUniformBuffer ), Take( cameraUniformBufferAllocation ) );
vmaDestroyBuffer( render_device.gpuAllocator, Take( cameraUniformBuffer ), Take( cameraUniformBufferAllocation ) );
renderDevice.bufferManager->freeBuffer( std::move( pointLights ) );
renderDevice.bufferManager->freeBuffer( std::move( directionalLights ) );
render_device.bufferManager->FreeBuffer( &pointLights );
render_device.bufferManager->FreeBuffer( &directionalLights );
vkDestroyPipeline( device, Take( meshPipeline ), nullptr );
vkDestroyPipelineLayout( device, Take( pipelineLayout ), nullptr );
vkDestroyDescriptorSetLayout( device, Take( descriptorSetLayout ), nullptr );
}
} // namespace Blaze

View File

@ -1,13 +1,13 @@
#pragma once
#include <array>
#include "VulkanHeader.h"
#include <DirectXMath.h>
#include "BufferManager.h"
namespace Blaze
{
struct GlobalMemory;
struct RenderDevice;
@ -31,9 +31,9 @@ struct MiscData
struct DirectionalLight
{
DirectX::XMFLOAT3 direction;
float _padding0;
float padding0;
DirectX::XMFLOAT3 color;
float _padding1;
float padding1;
};
struct LightData
@ -75,6 +75,7 @@ struct MiscData
uint8_t frameTimeWriteHead;
uint8_t frameTimeEntryCount;
bool init( RenderDevice const& renderDevice );
void destroy( RenderDevice const& renderDevice );
bool Init( RenderDevice const& render_device );
void Destroy( RenderDevice const& render_device );
};
} // namespace Blaze

View File

@ -0,0 +1,807 @@
#include "ModelLoader.h"
#include <algorithm>
#include <memory_resource>
#include <string_view>
#include <DirectXMath.h>
#include <SDL3/SDL_log.h>
#include <cgltf.h>
#include <stb_image.h>
#include "EntityManager.h"
#include "Frame.h"
#include "GlobalMemory.h"
#include "MacroUtils.h"
namespace Blaze
{
std::optional<TextureID> LoadTexture(
RenderDevice* render_device, VkSampler sampler, cgltf_image const& image, bool const linear )
{
byte* data;
if ( image.buffer_view->data )
{
data = static_cast<byte*>( image.buffer_view->data );
}
else
{
data = static_cast<byte*>( image.buffer_view->buffer->data ) + image.buffer_view->offset;
}
size_t size = image.buffer_view->size;
uint32_t width;
uint32_t height;
uint32_t num_channels = 4;
stbi_uc* texture_data;
{
int w;
int h;
int nc;
int n_req_channels = static_cast<int>( num_channels );
texture_data = stbi_load_from_memory(
reinterpret_cast<stbi_uc const*>( data ), static_cast<int>( size ), &w, &h, &nc, n_req_channels );
ASSERT( nc <= n_req_channels );
if ( not texture_data )
{
return std::nullopt;
}
width = static_cast<uint32_t>( w );
height = static_cast<uint32_t>( h );
}
TextureID texture = render_device->textureManager->CreateTexture(
{ width, height, 1 }, sampler, linear ? VK_FORMAT_R8G8B8A8_UNORM : VK_FORMAT_R8G8B8A8_SRGB );
if ( not texture )
{
return std::nullopt;
}
VkImage texture_image = render_device->textureManager->FetchImage( texture ).value();
// Staging Buffer Create
VkBuffer staging_buffer;
VmaAllocation staging_allocation;
{
VkBufferCreateInfo const staging_buffer_create_info = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = static_cast<VkDeviceSize>( width ) * height * num_channels * sizeof( texture_data[0] ),
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr staging_allocation_create_info = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocation_info;
VK_CHECK( vmaCreateBuffer(
render_device->gpuAllocator,
&staging_buffer_create_info,
&staging_allocation_create_info,
&staging_buffer,
&staging_allocation,
&allocation_info ) );
if ( allocation_info.pMappedData )
{
memcpy( allocation_info.pMappedData, texture_data, staging_buffer_create_info.size );
}
}
// All data is copied to stagingBuffer, don't need this.
stbi_image_free( texture_data );
// Staging -> Texture transfer
{
Frame& frame_in_use = render_device->frames[render_device->frameIndex];
// This should just pass.
VK_CHECK( vkWaitForFences( render_device->device, 1, &frame_in_use.frameReadyToReuse, VK_TRUE, INT64_MAX ) );
// Reset Frame
VK_CHECK( vkResetFences( render_device->device, 1, &frame_in_use.frameReadyToReuse ) );
VK_CHECK( vkResetCommandPool( render_device->device, frame_in_use.commandPool, 0 ) );
VkCommandBufferBeginInfo constexpr begin_info = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.pNext = nullptr,
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
.pInheritanceInfo = nullptr,
};
uint32_t mip_levels = TextureManager::CalculateRequiredMipLevels( width, height, 1 );
VkImageSubresourceRange const subresource_range = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = mip_levels,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkImageMemoryBarrier2 const creation_to_transfer_image_barrier = {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT,
.srcAccessMask = VK_ACCESS_2_NONE,
.dstStageMask = VK_PIPELINE_STAGE_2_COPY_BIT,
.dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = render_device->textureManager->FetchImage( texture ).value(),
.subresourceRange = subresource_range,
};
VkDependencyInfo const creation_to_transfer_dependency = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.pNext = nullptr,
.dependencyFlags = 0,
.memoryBarrierCount = 0,
.pMemoryBarriers = nullptr,
.bufferMemoryBarrierCount = 0,
.pBufferMemoryBarriers = nullptr,
.imageMemoryBarrierCount = 1,
.pImageMemoryBarriers = &creation_to_transfer_image_barrier,
};
VkImageSubresourceRange all_but_last_mip_subresource = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = mip_levels - 1,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkImageSubresourceRange last_mip_subresource = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = mip_levels - 1,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkImageMemoryBarrier2 transfer_to_ready_image_barriers[] = {
{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,
.dstAccessMask = VK_ACCESS_2_SHADER_SAMPLED_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = texture_image,
.subresourceRange = all_but_last_mip_subresource,
},
{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,
.dstAccessMask = VK_ACCESS_2_SHADER_SAMPLED_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = texture_image,
.subresourceRange = last_mip_subresource,
}
};
VkDependencyInfo const transfer_to_ready_dependency = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.pNext = nullptr,
.dependencyFlags = 0,
.memoryBarrierCount = 0,
.pMemoryBarriers = nullptr,
.bufferMemoryBarrierCount = 0,
.pBufferMemoryBarriers = nullptr,
.imageMemoryBarrierCount = _countof( transfer_to_ready_image_barriers ),
.pImageMemoryBarriers = transfer_to_ready_image_barriers,
};
constexpr VkImageSubresourceRange mip_level_subresource = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkImageMemoryBarrier2 prepare_next_mip_level_barriers[] = {
// prepareNextMipLevelSrcImageBarrier
{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.dstAccessMask = VK_ACCESS_2_TRANSFER_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = texture_image,
.subresourceRange = mip_level_subresource,
},
// prepareNextMipLevelDstImageBarrier
{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_COPY_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_BLIT_BIT,
.dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = texture_image,
.subresourceRange = mip_level_subresource,
},
};
VkDependencyInfo const prepare_next_mip_level_dependency = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.pNext = nullptr,
.dependencyFlags = 0,
.memoryBarrierCount = 0,
.pMemoryBarriers = nullptr,
.bufferMemoryBarrierCount = 0,
.pBufferMemoryBarriers = nullptr,
.imageMemoryBarrierCount = _countof( prepare_next_mip_level_barriers ),
.pImageMemoryBarriers = prepare_next_mip_level_barriers,
};
vkBeginCommandBuffer( frame_in_use.commandBuffer, &begin_info );
{
VkImageSubresourceLayers image_subresource_layers = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
};
// TODO: Ensure `bufferRowLength` and `bufferImageHeight` are not required.
VkBufferImageCopy copy_region = {
.bufferOffset = 0,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = image_subresource_layers,
.imageOffset = {0, 0, 0},
.imageExtent = {width, height, 1}
};
// Start
vkCmdPipelineBarrier2( frame_in_use.commandBuffer, &creation_to_transfer_dependency );
// Staging -> Image L0
vkCmdCopyBufferToImage(
frame_in_use.commandBuffer,
staging_buffer,
texture_image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1,
&copy_region );
prepare_next_mip_level_barriers[0].subresourceRange.baseMipLevel = 0;
prepare_next_mip_level_barriers[1].subresourceRange.baseMipLevel = 1;
int32_t mip_src_width = static_cast<int32_t>( width );
int32_t mip_src_height = static_cast<int32_t>( height );
int32_t mip_dst_width = std::max( mip_src_width / 2, 1 );
int32_t mip_dst_height = std::max( mip_src_height / 2, 1 );
VkImageSubresourceLayers constexpr mip_subresource_layers = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkImageBlit2 image_blit = {
.sType = VK_STRUCTURE_TYPE_IMAGE_BLIT_2,
.pNext = nullptr,
.srcSubresource = mip_subresource_layers,
.srcOffsets = {{ 0, 0, 0 }, { mip_src_width, mip_src_height, 1 }},
.dstSubresource = mip_subresource_layers,
.dstOffsets = {{ 0, 0, 0 }, { mip_dst_width, mip_dst_height, 1 }},
};
image_blit.srcSubresource.mipLevel = 0;
image_blit.dstSubresource.mipLevel = 1;
image_blit.srcOffsets[1].x = mip_src_width;
image_blit.srcOffsets[1].y = mip_src_height;
image_blit.dstOffsets[1].x = mip_dst_width;
image_blit.dstOffsets[1].y = mip_dst_height;
VkBlitImageInfo2 blit_info = {
.sType = VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2,
.pNext = nullptr,
.srcImage = texture_image,
.srcImageLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.dstImage = texture_image,
.dstImageLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.regionCount = 1,
.pRegions = &image_blit,
.filter = VK_FILTER_LINEAR,
};
// MipMapping
for ( uint32_t dst_mip_level = 1; dst_mip_level < mip_levels; ++dst_mip_level )
{
vkCmdPipelineBarrier2( frame_in_use.commandBuffer, &prepare_next_mip_level_dependency );
vkCmdBlitImage2( frame_in_use.commandBuffer, &blit_info );
// Prep for NEXT iteration
mip_src_width = mip_dst_width;
mip_src_height = mip_dst_height;
mip_dst_width = std::max( mip_src_width / 2, 1 );
mip_dst_height = std::max( mip_src_height / 2, 1 );
image_blit.srcSubresource.mipLevel = dst_mip_level;
image_blit.dstSubresource.mipLevel = dst_mip_level + 1;
image_blit.srcOffsets[1].x = mip_src_width;
image_blit.srcOffsets[1].y = mip_src_height;
image_blit.dstOffsets[1].x = mip_dst_width;
image_blit.dstOffsets[1].y = mip_dst_height;
// Prep current mip level as source
prepare_next_mip_level_barriers[0].subresourceRange.baseMipLevel = dst_mip_level;
prepare_next_mip_level_barriers[1].subresourceRange.baseMipLevel = dst_mip_level + 1;
}
// End
vkCmdPipelineBarrier2( frame_in_use.commandBuffer, &transfer_to_ready_dependency );
}
vkEndCommandBuffer( frame_in_use.commandBuffer );
VkSubmitInfo submit_info = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = nullptr,
.waitSemaphoreCount = 0,
.pWaitSemaphores = nullptr,
.pWaitDstStageMask = nullptr,
.commandBufferCount = 1,
.pCommandBuffers = &frame_in_use.commandBuffer,
.signalSemaphoreCount = 0,
.pSignalSemaphores = nullptr,
};
VK_CHECK( vkQueueSubmit( render_device->directQueue, 1, &submit_info, frame_in_use.frameReadyToReuse ) );
// Do not reset this. Else, the frame will never be available to the main loop.
VK_CHECK( vkWaitForFences( render_device->device, 1, &frame_in_use.frameReadyToReuse, VK_TRUE, UINT64_MAX ) );
render_device->frameIndex = ( render_device->frameIndex + 1 ) % render_device->GetNumFrames();
}
vmaDestroyBuffer( render_device->gpuAllocator, staging_buffer, staging_allocation );
return texture;
}
// TODO: Cache materials while loading.
uint32_t ProcessMaterial( RenderDevice* render_device, Model* model, cgltf_material const& material )
{
ASSERT( material.has_pbr_metallic_roughness );
auto const base_color_factor = DirectX::XMFLOAT4{ material.pbr_metallic_roughness.base_color_factor };
auto const emissive_factor = DirectX::XMFLOAT4{
material.emissive_factor[0],
material.emissive_factor[1],
material.emissive_factor[2],
std::max( material.emissive_strength.emissive_strength, 1.0f ),
};
VkSampler sampler = nullptr;
TextureID base_color_texture;
TextureID normal_texture;
TextureID metal_rough_texture;
TextureID emissive_texture;
VkSamplerCreateInfo constexpr sampler_create_info = {
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.magFilter = VK_FILTER_LINEAR,
.minFilter = VK_FILTER_LINEAR,
.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR,
.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.mipLodBias = 0.0,
.anisotropyEnable = true,
.maxAnisotropy = 1.0f,
.compareEnable = false,
.compareOp = VK_COMPARE_OP_NEVER,
.minLod = 0.0f,
.maxLod = VK_LOD_CLAMP_NONE,
.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK,
.unnormalizedCoordinates = false,
};
VK_CHECK( vkCreateSampler( render_device->device, &sampler_create_info, nullptr, &sampler ) );
if ( material.pbr_metallic_roughness.base_color_texture.texture )
{
cgltf_image const* base_color_image = material.pbr_metallic_roughness.base_color_texture.texture->image;
auto const base_color_texture_opt = LoadTexture( render_device, sampler, *base_color_image, false );
if ( not base_color_texture_opt )
{
vkDestroySampler( render_device->device, Take( sampler ), nullptr );
return UINT32_MAX;
}
base_color_texture = base_color_texture_opt.value();
}
if ( material.pbr_metallic_roughness.metallic_roughness_texture.texture )
{
cgltf_image const* metal_rough_image = material.pbr_metallic_roughness.metallic_roughness_texture.texture->image;
auto const metal_rough_texture_opt = LoadTexture( render_device, sampler, *metal_rough_image, true );
if ( not metal_rough_texture_opt )
{
vkDestroySampler( render_device->device, Take( sampler ), nullptr );
render_device->textureManager->FreeTexture( &base_color_texture );
return UINT32_MAX;
}
metal_rough_texture = metal_rough_texture_opt.value();
}
if ( material.normal_texture.texture )
{
cgltf_image const* normal_image = material.normal_texture.texture->image;
auto const normal_texture_opt = LoadTexture( render_device, sampler, *normal_image, true );
if ( not normal_texture_opt )
{
vkDestroySampler( render_device->device, Take( sampler ), nullptr );
render_device->textureManager->FreeTexture( &metal_rough_texture );
render_device->textureManager->FreeTexture( &base_color_texture );
return UINT32_MAX;
}
normal_texture = normal_texture_opt.value();
}
if ( material.emissive_texture.texture )
{
cgltf_image const* emissive_image = material.emissive_texture.texture->image;
auto const emissive_texture_opt = LoadTexture( render_device, sampler, *emissive_image, true );
if ( not emissive_texture_opt )
{
vkDestroySampler( render_device->device, Take( sampler ), nullptr );
render_device->textureManager->FreeTexture( &base_color_texture );
render_device->textureManager->FreeTexture( &normal_texture );
render_device->textureManager->FreeTexture( &metal_rough_texture );
return UINT32_MAX;
}
emissive_texture = emissive_texture_opt.value();
}
float const metallic = material.pbr_metallic_roughness.metallic_factor;
float const roughness = material.pbr_metallic_roughness.roughness_factor;
uint32_t const material_idx = static_cast<uint32_t>( model->materials.size() );
model->materials.push_back( {
sampler,
base_color_factor,
emissive_factor,
base_color_texture,
normal_texture,
metal_rough_texture,
emissive_texture,
roughness,
metallic,
} );
return material_idx;
}
void LoadAttribute(
std::vector<Vertex>* vertices,
int32_t const vertex_start,
std::vector<float>* scratch,
cgltf_attribute const& position_attr,
size_t const stride,
size_t const offset,
size_t const components )
{
size_t const float_count = cgltf_accessor_unpack_floats( position_attr.data, nullptr, 0 );
ASSERT( float_count % components == 0 );
scratch->resize( float_count );
cgltf_accessor_unpack_floats( position_attr.data, scratch->data(), scratch->size() );
// Guaranteed to have space for these vertices.
vertices->resize( vertex_start + float_count / components );
byte* write_ptr = reinterpret_cast<byte*>( vertices->data() + vertex_start ) + offset;
float const* read_ptr = scratch->data();
for ( size_t i = vertex_start; i < vertices->size(); ++i )
{
memcpy( write_ptr, read_ptr, components * sizeof( float ) );
read_ptr += components;
write_ptr += stride;
}
scratch->clear();
}
ModelMesh ProcessMesh(
RenderDevice* render_device,
Model* model,
std::vector<Vertex>* vertices,
std::vector<uint32_t>* indices,
cgltf_mesh const& mesh )
{
using namespace std::string_view_literals;
uint32_t const primitive_start = static_cast<uint32_t>( model->primitives.size() );
uint32_t const primitive_count = static_cast<uint32_t>( mesh.primitives_count );
cgltf_primitive const* primitives = mesh.primitives;
for ( uint32_t primitive_index = 0; primitive_index < mesh.primitives_count; ++primitive_index )
{
// VertexStart is per-primitive
int32_t const vertex_start = static_cast<int32_t>( vertices->size() );
cgltf_primitive const& primitive = primitives[primitive_index];
ASSERT( primitive.type == cgltf_primitive_type_triangles );
// Index Buffer
size_t const index_start = indices->size();
size_t const index_count = cgltf_accessor_unpack_indices( primitive.indices, nullptr, sizeof indices->at( 0 ), 0 );
ASSERT( index_count > 0 );
indices->resize( index_start + index_count );
cgltf_accessor_unpack_indices(
primitive.indices, indices->data() + index_start, sizeof indices->at( 0 ), index_count );
// Material
uint32_t material_idx = UINT32_MAX;
if ( primitive.material )
{
material_idx = ProcessMaterial( render_device, model, *primitive.material );
}
model->primitives.push_back( Primitive{
.indexStart = static_cast<uint32_t>( index_start ),
.indexCount = static_cast<uint32_t>( index_count ),
.material = material_idx,
.vertexOffset = vertex_start,
} );
std::vector<float> scratch;
cgltf_attribute const* attributes = primitive.attributes;
for ( uint32_t attrib_index = 0; attrib_index < primitive.attributes_count; ++attrib_index )
{
if ( "POSITION"sv == attributes[attrib_index].name )
{
cgltf_attribute const& position_attr = attributes[attrib_index];
ASSERT( position_attr.data->component_type == cgltf_component_type_r_32f );
ASSERT( position_attr.data->type == cgltf_type_vec3 );
size_t constexpr stride = sizeof( Vertex );
size_t constexpr offset = offsetof( Vertex, position );
size_t constexpr components = 3;
LoadAttribute( vertices, vertex_start, &scratch, position_attr, stride, offset, components );
}
if ( "NORMAL"sv == attributes[attrib_index].name )
{
cgltf_attribute const& normal_attr = attributes[attrib_index];
ASSERT( normal_attr.data->component_type == cgltf_component_type_r_32f );
ASSERT( normal_attr.data->type == cgltf_type_vec3 );
size_t constexpr stride = sizeof( Vertex );
size_t constexpr offset = offsetof( Vertex, normal );
size_t constexpr components = 3;
LoadAttribute( vertices, vertex_start, &scratch, normal_attr, stride, offset, components );
}
if ( "TANGENT"sv == attributes[attrib_index].name )
{
cgltf_attribute const& tangent_attr = attributes[attrib_index];
ASSERT( tangent_attr.data->component_type == cgltf_component_type_r_32f );
ASSERT( tangent_attr.data->type == cgltf_type_vec4 );
size_t constexpr stride = sizeof( Vertex );
size_t constexpr offset = offsetof( Vertex, tangent );
size_t constexpr components = 4;
LoadAttribute( vertices, vertex_start, &scratch, tangent_attr, stride, offset, components );
}
if ( "TEXCOORD_0"sv == attributes[attrib_index].name )
{
cgltf_attribute const& tex_coord_attr = attributes[attrib_index];
ASSERT( tex_coord_attr.data->component_type == cgltf_component_type_r_32f );
ASSERT( tex_coord_attr.data->type == cgltf_type_vec2 );
size_t constexpr stride = sizeof( Vertex );
size_t constexpr offset = offsetof( Vertex, texCoord0 );
size_t constexpr components = 2;
LoadAttribute( vertices, vertex_start, &scratch, tex_coord_attr, stride, offset, components );
}
if ( "TEXCOORD_1"sv == attributes[attrib_index].name )
{
cgltf_attribute const& tex_coord_attr = attributes[attrib_index];
ASSERT( tex_coord_attr.data->component_type == cgltf_component_type_r_32f );
ASSERT( tex_coord_attr.data->type == cgltf_type_vec2 );
size_t constexpr stride = sizeof( Vertex );
size_t constexpr offset = offsetof( Vertex, texCoord1 );
size_t constexpr components = 2;
LoadAttribute( vertices, vertex_start, &scratch, tex_coord_attr, stride, offset, components );
}
if ( "COLOR_0"sv == attributes[attrib_index].name )
{
cgltf_attribute const& color_attr = attributes[attrib_index];
ASSERT( color_attr.data->component_type == cgltf_component_type_r_32f );
size_t constexpr stride = sizeof( Vertex );
size_t constexpr offset = offsetof( Vertex, texCoord1 );
size_t components = 3;
switch ( color_attr.data->type )
{
case cgltf_type_vec3:
components = 3;
break;
case cgltf_type_vec4:
components = 4;
break;
default:
UNREACHABLE;
}
LoadAttribute( vertices, vertex_start, &scratch, color_attr, stride, offset, components );
}
// TODO: Grab other attributes.
}
}
return { primitive_start, primitive_count };
}
Entity* ProcessNode(
RenderDevice* render_device,
EntityManager* entity_manager,
Model* model,
std::vector<Vertex>* vertices,
std::vector<uint32_t>* indices,
cgltf_node const& node )
{
DirectX::XMVECTOR translation;
DirectX::XMVECTOR rotation;
DirectX::XMVECTOR scale;
if ( node.has_matrix )
{
auto const mat = DirectX::XMMATRIX{ node.matrix };
ASSERT( DirectX::XMMatrixDecompose( &scale, &rotation, &translation, mat ) );
}
else
{
translation = node.has_translation
? DirectX::XMVectorSet( node.translation[0], node.translation[1], node.translation[2], 1.0f )
: DirectX::XMVectorZero();
rotation = node.has_rotation
? DirectX::XMVectorSet( node.rotation[0], node.rotation[1], node.rotation[2], node.rotation[3] )
: DirectX::XMQuaternionIdentity();
scale = node.has_scale ? DirectX::XMVectorSet( node.scale[0], node.scale[1], node.scale[2], 1.0f )
: DirectX::XMVectorSplatOne();
}
Entity* entity = entity_manager->CreateEntity( {
.translation = translation,
.rotation = rotation,
.scale = scale,
} );
if ( node.mesh )
{
entity->modelMesh = ProcessMesh( render_device, model, vertices, indices, *node.mesh );
}
for ( uint32_t child_idx = 0; child_idx < node.children_count; ++child_idx )
{
entity->AddChild(
ProcessNode( render_device, entity_manager, model, vertices, indices, *node.children[child_idx] ) );
}
return entity;
}
Entity* LoadModel( Blaze::RenderDevice* render_device, EntityManager* entity_manager, char const* filename )
{
cgltf_data* gltf_model = nullptr;
cgltf_options options = {};
cgltf_result result = cgltf_parse_file( &options, filename, &gltf_model );
if ( result != cgltf_result_success )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "%s failed to load", filename );
cgltf_free( gltf_model );
return nullptr;
}
result = cgltf_validate( gltf_model );
if ( result != cgltf_result_success )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "%s is invalid.", filename );
cgltf_free( gltf_model );
return nullptr;
}
result = cgltf_load_buffers( &options, gltf_model, filename );
if ( result != cgltf_result_success )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "%s buffers failed to load.", filename );
cgltf_free( gltf_model );
return nullptr;
}
Entity* entity = entity_manager->CreateEntity( {
.translation = DirectX::XMVectorZero(),
.rotation = DirectX::XMQuaternionIdentity(),
.scale = DirectX::XMVectorSplatOne(),
} );
// Output data
std::vector<Vertex> vertices;
std::vector<uint32_t> indices;
cgltf_scene const* current_scene = gltf_model->scene;
for ( uint32_t node_idx = 0; node_idx < current_scene->nodes_count; ++node_idx )
{
entity->AddChild( ProcessNode(
render_device, entity_manager, &entity->model, &vertices, &indices, *current_scene->nodes[node_idx] ) );
}
entity->model.vertexBuffer = render_device->bufferManager->CreateVertexBuffer( vertices.size() * sizeof vertices[0] );
if ( not entity->model.vertexBuffer ) return nullptr;
render_device->bufferManager->WriteToBuffer( entity->model.vertexBuffer, vertices );
entity->model.indexBuffer = render_device->bufferManager->CreateIndexBuffer( indices.size() * sizeof indices[0] );
if ( not entity->model.indexBuffer ) return nullptr;
render_device->bufferManager->WriteToBuffer( entity->model.indexBuffer, std::span{ indices } );
cgltf_free( gltf_model );
return entity;
}
} // namespace Blaze

View File

@ -7,10 +7,13 @@
#include "BufferManager.h"
#include "TextureManager.h"
namespace Blaze
{
struct GlobalMemory;
struct RenderDevice;
struct EntityManager;
struct Entity;
struct GlobalMemory;
struct Vertex
{
@ -35,7 +38,7 @@ struct ModelMesh
uint32_t primitiveStart = 0;
uint32_t primitiveCount = 0;
[[nodiscard]] bool isNull() const
[[nodiscard]] bool IsNull() const
{
return primitiveCount == 0;
}
@ -43,8 +46,8 @@ struct ModelMesh
struct Material
{
size_t constexpr static GPU_DATA_OFFSET = sizeof( VkSampler );
size_t constexpr static GPU_DATA_SIZE = 56;
size_t constexpr static kGPUDataOffset = sizeof( VkSampler );
size_t constexpr static kGPUDataSize = 56;
VkSampler sampler; // TODO: Reuse
// To copy directly.
@ -57,29 +60,29 @@ struct Material
float roughness = 1.0f;
float metallic = 1.0f;
[[nodiscard]] bool isNull() const
[[nodiscard]] bool IsNull() const
{
return not( albedoTextureID and normalTextureID and metalRoughTextureID and emissiveTextureID and sampler );
}
};
static_assert( sizeof( Material ) == Material::GPU_DATA_OFFSET + Material::GPU_DATA_SIZE );
static_assert( sizeof( Material ) == Material::kGPUDataOffset + Material::kGPUDataSize );
static constexpr Material DEFAULT_MATERIAL = {};
struct Model
{
std::pmr::monotonic_buffer_resource mem;
BufferID vertexBuffer;
BufferID indexBuffer;
std::vector<Material> materials;
std::vector<Primitive> primitives;
BufferID vertexBuffer;
BufferID indexBuffer;
std::pmr::vector<Material> materials;
std::pmr::vector<Primitive> primitives;
[[nodiscard]] bool isNull() const
[[nodiscard]] bool IsNull() const
{
return vertexBuffer.isNull();
return vertexBuffer.IsNull();
}
};
Entity* LoadModel( RenderDevice* renderDevice, EntityManager* entityManager, const char* filename );
Entity* LoadModel( RenderDevice* render_device, EntityManager* entity_manager, const char* filename );
} // namespace Blaze

36
Blaze/Source/RID.h Normal file
View File

@ -0,0 +1,36 @@
#pragma once
#include <cstdint>
namespace Blaze
{
template <typename T>
struct RID
{
private:
uint32_t m_index = 0;
explicit RID( uint32_t const index ) : m_index{ index }
{}
public:
RID() = default;
[[nodiscard]] bool IsNull() const
{
return m_index == 0;
}
static RID null()
{
return {};
}
operator bool() const
{
return m_index != 0;
}
};
} // namespace Blaze

View File

@ -0,0 +1,528 @@
#include "RenderDevice.h"
#include <algorithm>
#include "MacroUtils.h"
#include <SDL3/SDL_log.h>
#include <array>
#include <optional>
#include <span>
#include "BufferManager.h"
#include "Frame.h"
#include "GlobalMemory.h"
#include "TextureManager.h"
using Blaze::RenderDevice;
#if defined( DTOR_TEST )
RenderDevice::~RenderDevice()
{
ASSERT( !IsInit() );
}
#endif
RenderDevice* RenderDevice::Create( GlobalMemory* mem, CreateInfo const& create_info )
{
ASSERT( mem );
ASSERT( create_info.window );
volkInitialize();
VkInstance instance;
// Create Instance
{
VkApplicationInfo constexpr application_info = {
.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
.pNext = nullptr,
.pApplicationName = "Test",
.applicationVersion = VK_MAKE_API_VERSION( 0, 0, 1, 0 ),
.pEngineName = "Blaze",
.engineVersion = VK_MAKE_API_VERSION( 0, 0, 1, 0 ),
.apiVersion = VK_API_VERSION_1_3,
};
uint32_t instance_extension_count;
char const* const* instance_extensions = SDL_Vulkan_GetInstanceExtensions( &instance_extension_count );
VkInstanceCreateInfo const instance_create_info = {
.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.pApplicationInfo = &application_info,
.enabledLayerCount = 0,
.ppEnabledLayerNames = nullptr,
.enabledExtensionCount = instance_extension_count,
.ppEnabledExtensionNames = instance_extensions,
};
VK_CHECK( vkCreateInstance( &instance_create_info, nullptr, &instance ) );
volkLoadInstance( instance );
}
VkSurfaceKHR surface;
// Create Surface
ASSERT( SDL_Vulkan_CreateSurface( create_info.window, instance, nullptr, &surface ) );
VkPhysicalDevice physical_device_in_use = nullptr;
VkDevice device = nullptr;
VmaAllocator allocator = nullptr;
std::optional<uint32_t> direct_queue_family_index = std::nullopt;
VkQueue direct_queue = nullptr;
// Create Device and Queue
{
auto temp_alloc_start = mem->GetState();
uint32_t physical_device_count;
VK_CHECK( vkEnumeratePhysicalDevices( instance, &physical_device_count, nullptr ) );
SDL_LogInfo( SDL_LOG_CATEGORY_GPU, "Found %u GPUs", physical_device_count );
VkPhysicalDevice* physical_devices =
reinterpret_cast<VkPhysicalDevice*>( mem->Allocate( sizeof( VkPhysicalDevice ) * physical_device_count ) );
VK_CHECK( vkEnumeratePhysicalDevices( instance, &physical_device_count, physical_devices ) );
for ( VkPhysicalDevice const physical_device : std::span{ physical_devices, physical_device_count } )
{
auto temp_alloc_queue_properties = mem->GetState();
VkPhysicalDeviceProperties properties;
vkGetPhysicalDeviceProperties( physical_device, &properties );
SDL_LogInfo( SDL_LOG_CATEGORY_GPU, "GPU: %s", properties.deviceName );
SDL_LogInfo(
SDL_LOG_CATEGORY_GPU,
"- API Version %d.%d.%d",
VK_API_VERSION_MAJOR( properties.apiVersion ),
VK_API_VERSION_MINOR( properties.apiVersion ),
VK_API_VERSION_PATCH( properties.apiVersion ) );
constexpr static uint32_t kApiPatchBits = 0xFFF;
if ( ( properties.apiVersion & ( ~kApiPatchBits ) ) < VK_API_VERSION_1_3 )
{
continue;
}
if ( properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_CPU )
{
continue;
}
uint32_t queue_family_count;
vkGetPhysicalDeviceQueueFamilyProperties( physical_device, &queue_family_count, nullptr );
VkQueueFamilyProperties* queue_family_properties = reinterpret_cast<VkQueueFamilyProperties*>(
mem->Allocate( sizeof( VkQueueFamilyProperties ) * queue_family_count ) );
vkGetPhysicalDeviceQueueFamilyProperties( physical_device, &queue_family_count, queue_family_properties );
for ( uint32_t queue_family_index = 0; queue_family_index != queue_family_count; ++queue_family_index )
{
VkQueueFamilyProperties const& qfp = queue_family_properties[queue_family_index];
bool has_graphics_support = false;
bool has_compute_support = false;
bool has_transfer_support = false;
bool has_present_support = false;
SDL_LogInfo( SDL_LOG_CATEGORY_GPU, "- Queue [%d]", queue_family_index );
if ( qfp.queueFlags & VK_QUEUE_GRAPHICS_BIT )
{
has_graphics_support = true;
SDL_LogInfo( SDL_LOG_CATEGORY_GPU, "-- Graphic" );
}
if ( qfp.queueFlags & VK_QUEUE_COMPUTE_BIT )
{
has_compute_support = true;
SDL_LogInfo( SDL_LOG_CATEGORY_GPU, "-- Compute" );
}
if ( qfp.queueFlags & VK_QUEUE_TRANSFER_BIT )
{
has_transfer_support = true;
SDL_LogInfo( SDL_LOG_CATEGORY_GPU, "-- Transfer" );
}
VkBool32 is_surface_supported;
VK_CHECK(
vkGetPhysicalDeviceSurfaceSupportKHR( physical_device, queue_family_index, surface, &is_surface_supported ) );
if ( is_surface_supported )
{
has_present_support = true;
SDL_LogInfo( SDL_LOG_CATEGORY_GPU, "-- Present" );
}
if ( has_graphics_support and has_compute_support and has_transfer_support and has_present_support )
{
physical_device_in_use = physical_device;
direct_queue_family_index = queue_family_index;
break;
}
}
mem->RestoreState( temp_alloc_queue_properties );
}
ASSERT( physical_device_in_use );
ASSERT( direct_queue_family_index.has_value() );
float priority = 1.0f;
VkDeviceQueueCreateInfo queue_create_info = {
.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.queueFamilyIndex = direct_queue_family_index.value(),
.queueCount = 1,
.pQueuePriorities = &priority,
};
VkPhysicalDeviceVulkan13Features features13 = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES,
.pNext = nullptr,
.synchronization2 = true,
.dynamicRendering = true,
};
VkPhysicalDeviceVulkan12Features const features12 = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES,
.pNext = &features13,
.descriptorIndexing = true,
.shaderSampledImageArrayNonUniformIndexing = true,
.shaderStorageImageArrayNonUniformIndexing = true,
.descriptorBindingUniformBufferUpdateAfterBind = true,
.descriptorBindingSampledImageUpdateAfterBind = true,
.descriptorBindingStorageImageUpdateAfterBind = true,
.descriptorBindingUpdateUnusedWhilePending = true,
.descriptorBindingPartiallyBound = true,
.descriptorBindingVariableDescriptorCount = true,
.runtimeDescriptorArray = true,
.bufferDeviceAddress = true,
};
VkPhysicalDeviceFeatures features = {
.depthClamp = true,
.samplerAnisotropy = true,
};
std::array enabled_device_extensions = { VK_KHR_SWAPCHAIN_EXTENSION_NAME };
VkDeviceCreateInfo const device_create_info = {
.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
.pNext = &features12,
.flags = 0,
.queueCreateInfoCount = 1,
.pQueueCreateInfos = &queue_create_info,
.enabledLayerCount = 0,
.ppEnabledLayerNames = nullptr,
.enabledExtensionCount = static_cast<uint32_t>( enabled_device_extensions.size() ),
.ppEnabledExtensionNames = enabled_device_extensions.data(),
.pEnabledFeatures = &features,
};
VK_CHECK( vkCreateDevice( physical_device_in_use, &device_create_info, nullptr, &device ) );
volkLoadDevice( device );
VmaAllocatorCreateInfo allocator_create_info = {
.flags = VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT,
.physicalDevice = physical_device_in_use,
.device = device,
.preferredLargeHeapBlockSize = 0,
.pAllocationCallbacks = nullptr,
.pDeviceMemoryCallbacks = nullptr,
.pHeapSizeLimit = nullptr,
.pVulkanFunctions = nullptr,
.instance = instance,
.vulkanApiVersion = VK_API_VERSION_1_3,
.pTypeExternalMemoryHandleTypes = nullptr,
};
VmaVulkanFunctions vk_functions;
VK_CHECK( vmaImportVulkanFunctionsFromVolk( &allocator_create_info, &vk_functions ) );
allocator_create_info.pVulkanFunctions = &vk_functions;
VK_CHECK( vmaCreateAllocator( &allocator_create_info, &allocator ) );
vkGetDeviceQueue( device, direct_queue_family_index.value(), 0, &direct_queue );
mem->RestoreState( temp_alloc_start );
}
// Swapchain creation
VkExtent2D swapchain_extent = { create_info.width, create_info.height };
VkFormat swapchain_format = VK_FORMAT_UNDEFINED;
VkSwapchainKHR swapchain;
VkImage* swapchain_images;
VkImageView* swapchain_views;
uint32_t swapchain_image_count;
{
auto temp_alloc_start = mem->GetState();
VkSurfaceCapabilitiesKHR capabilities;
VK_CHECK( vkGetPhysicalDeviceSurfaceCapabilitiesKHR( physical_device_in_use, surface, &capabilities ) );
// Image Count Calculation
swapchain_image_count = 3;
if ( capabilities.maxImageCount > 0 )
{
swapchain_image_count = std::min( swapchain_image_count, capabilities.maxImageCount );
}
swapchain_image_count = std::max( swapchain_image_count, capabilities.minImageCount + 1 );
// Image Size calculation
{
auto [minWidth, minHeight] = capabilities.minImageExtent;
auto [maxWidth, maxHeight] = capabilities.maxImageExtent;
swapchain_extent.width = std::clamp( swapchain_extent.width, minWidth, maxWidth );
swapchain_extent.height = std::clamp( swapchain_extent.height, minHeight, maxHeight );
}
uint32_t surface_format_count;
vkGetPhysicalDeviceSurfaceFormatsKHR( physical_device_in_use, surface, &surface_format_count, nullptr );
VkSurfaceFormatKHR* surface_formats =
reinterpret_cast<VkSurfaceFormatKHR*>( mem->Allocate( sizeof( VkSurfaceFormatKHR ) * surface_format_count ) );
vkGetPhysicalDeviceSurfaceFormatsKHR( physical_device_in_use, surface, &surface_format_count, surface_formats );
VkSurfaceFormatKHR format = {
.format = VK_FORMAT_UNDEFINED,
.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR,
};
for ( uint32_t i = 0; i < surface_format_count; ++i )
{
VkSurfaceFormatKHR surface_format = surface_formats[i];
if ( surface_format.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR )
{
SDL_LogInfo( SDL_LOG_CATEGORY_GPU, "Color Space SRGB Found %d", surface_format.format );
if ( surface_format.format == VK_FORMAT_R8G8B8A8_SRGB )
{
format = surface_format;
break;
}
if ( surface_format.format == VK_FORMAT_B8G8R8A8_SRGB )
{
format = surface_format;
break;
}
if ( surface_format.format == VK_FORMAT_R8G8B8A8_UNORM )
{
format = surface_format;
}
}
}
ASSERT( format.format != VK_FORMAT_UNDEFINED );
swapchain_format = format.format;
uint32_t present_mode_count;
vkGetPhysicalDeviceSurfacePresentModesKHR( physical_device_in_use, surface, &present_mode_count, nullptr );
VkPresentModeKHR* present_modes =
reinterpret_cast<VkPresentModeKHR*>( mem->Allocate( sizeof( VkPresentModeKHR ) * present_mode_count ) );
vkGetPhysicalDeviceSurfacePresentModesKHR( physical_device_in_use, surface, &present_mode_count, present_modes );
VkPresentModeKHR present_mode = VK_PRESENT_MODE_FIFO_KHR;
for ( uint32_t i = 0; i < present_mode_count; ++i )
{
VkPresentModeKHR present_mode_iter = present_modes[i];
if ( present_mode_iter == VK_PRESENT_MODE_FIFO_RELAXED_KHR )
{
present_mode = present_mode_iter;
break;
}
if ( present_mode_iter == VK_PRESENT_MODE_MAILBOX_KHR )
{
present_mode = present_mode_iter;
}
}
mem->RestoreState( temp_alloc_start );
VkSwapchainCreateInfoKHR const swapchain_create_info = {
.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
.pNext = nullptr,
.flags = 0,
.surface = surface,
.minImageCount = swapchain_image_count,
.imageFormat = format.format,
.imageColorSpace = format.colorSpace,
.imageExtent = swapchain_extent,
.imageArrayLayers = 1,
.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
.preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR,
.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
.presentMode = present_mode,
.clipped = false,
.oldSwapchain = nullptr,
};
VK_CHECK( vkCreateSwapchainKHR( device, &swapchain_create_info, nullptr, &swapchain ) );
swapchain_image_count = 0;
vkGetSwapchainImagesKHR( device, swapchain, &swapchain_image_count, nullptr );
swapchain_images = reinterpret_cast<VkImage*>( mem->Allocate( sizeof( VkImage ) * swapchain_image_count ) );
vkGetSwapchainImagesKHR( device, swapchain, &swapchain_image_count, swapchain_images );
swapchain_views = reinterpret_cast<VkImageView*>( mem->Allocate( sizeof( VkImageView ) * swapchain_image_count ) );
for ( uint32_t i = 0; i != swapchain_image_count; ++i )
{
VkComponentMapping constexpr component_mapping = {
VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY,
};
VkImageSubresourceRange constexpr subresource_range = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkImageViewCreateInfo const view_create_info = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.image = swapchain_images[i],
.viewType = VK_IMAGE_VIEW_TYPE_2D,
.format = format.format,
.components = component_mapping,
.subresourceRange = subresource_range,
};
VK_CHECK( vkCreateImageView( device, &view_create_info, nullptr, &swapchain_views[i] ) );
}
}
// Init frames.
Frame* frames = reinterpret_cast<Frame*>( mem->Allocate( sizeof( Frame ) * swapchain_image_count ) );
for ( uint32_t i = 0; i != swapchain_image_count; ++i )
{
frames[i] = Frame::Create( device, allocator, direct_queue_family_index.value(), swapchain_extent );
}
std::byte* allocation = mem->Allocate( sizeof( RenderDevice ), alignof( RenderDevice ) );
if ( not allocation ) return nullptr;
RenderDevice* render_device = new ( allocation ) RenderDevice{
instance,
surface,
physical_device_in_use,
device,
allocator,
direct_queue,
direct_queue_family_index.value(),
swapchain_format,
swapchain_extent,
swapchain,
swapchain_images,
swapchain_views,
frames,
swapchain_image_count,
};
TextureManager* texture_manager = TextureManager::Create( mem, render_device, 10000 );
if ( !texture_manager )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "TextureManager failed to init" );
render_device->Destroy();
return nullptr;
}
render_device->textureManager = texture_manager;
ASSERT( render_device->textureManager );
BufferManager* buffer_manager = BufferManager::Create( mem, render_device, 10000 );
if ( !buffer_manager )
{
SDL_LogError( SDL_LOG_CATEGORY_APPLICATION, "BufferManager failed to init" );
render_device->Destroy();
return nullptr;
}
render_device->bufferManager = buffer_manager;
ASSERT( render_device->bufferManager );
return render_device;
}
inline bool RenderDevice::IsInit() const
{
return instance and device and textureManager;
}
void RenderDevice::Destroy()
{
if ( not IsInit() ) return;
Take( bufferManager )->Destroy();
Take( textureManager )->Destroy();
for ( size_t i = 0; i < swapchainImageCount; ++i )
{
frames[i].Destroy( *this );
}
for ( auto const& view : std::span{ Take( swapchainViews ), swapchainImageCount } )
{
vkDestroyImageView( device, view, nullptr );
}
vkDestroySwapchainKHR( device, Take( swapchain ), nullptr );
vmaDestroyAllocator( Take( gpuAllocator ) );
vkDestroyDevice( Take( device ), nullptr );
SDL_Vulkan_DestroySurface( instance, Take( surface ), nullptr );
vkDestroyInstance( Take( instance ), nullptr );
volkFinalize();
}
void RenderDevice::WaitIdle() const
{
VK_CHECK( vkDeviceWaitIdle( device ) );
}
uint32_t RenderDevice::GetNumFrames() const
{
return swapchainImageCount;
}
RenderDevice::RenderDevice(
VkInstance const instance,
VkSurfaceKHR const surface,
VkPhysicalDevice const physical_device_in_use,
VkDevice const device,
VmaAllocator const allocator,
VkQueue const direct_queue,
uint32_t const direct_queue_family_index,
VkFormat const swapchain_format,
VkExtent2D const swapchain_extent,
VkSwapchainKHR const swapchain,
VkImage* swapchain_images,
VkImageView* swapchain_views,
Frame* frames,
uint32_t const swapchain_image_count )
: instance{ instance }
, surface{ surface }
, physicalDeviceInUse{ physical_device_in_use }
, device{ device }
, gpuAllocator{ allocator }
, directQueue{ direct_queue }
, directQueueFamilyIndex{ direct_queue_family_index }
, swapchainFormat{ swapchain_format }
, swapchainExtent{ swapchain_extent }
, swapchain{ swapchain }
, swapchainImages{ swapchain_images }
, swapchainViews{ swapchain_views }
, frames{ frames }
, swapchainImageCount{ swapchain_image_count }
, textureManager{ nullptr }
, bufferManager{ nullptr }
{}

View File

@ -5,16 +5,16 @@
#include "VulkanHeader.h"
struct BufferManager;
namespace Blaze
{
struct GlobalMemory;
struct Frame;
struct BufferManager;
struct TextureManager;
/// The Rendering backend abstraction
/// If this fails to initialize, we crash
///
/// TODO: Fail elegantly.
struct RenderDevice
{
struct CreateInfo
@ -45,36 +45,34 @@ struct RenderDevice
TextureManager* textureManager;
BufferManager* bufferManager;
[[nodiscard]] bool isInit() const;
void destroy();
void waitIdle() const;
[[nodiscard]] uint32_t getNumFrames() const;
[[nodiscard]] bool IsInit() const;
void WaitIdle() const;
[[nodiscard]] uint32_t GetNumFrames() const;
static RenderDevice* Create( GlobalMemory* mem, RenderDevice::CreateInfo const& create_info );
void Destroy();
RenderDevice(
VkInstance instance,
VkSurfaceKHR surface,
VkPhysicalDevice physicalDeviceInUse,
VkDevice device,
VmaAllocator gpuAllocator,
VkQueue directQueue,
uint32_t directQueueFamilyIndex,
VkInstance instance,
VkSurfaceKHR surface,
VkPhysicalDevice physical_device_in_use,
VkDevice device,
VmaAllocator allocator,
VkQueue direct_queue,
uint32_t direct_queue_family_index,
VkFormat swapchainFormat,
VkExtent2D swapchainExtent,
VkSwapchainKHR swapchain,
VkFormat swapchain_format,
VkExtent2D swapchain_extent,
VkSwapchainKHR swapchain,
VkImage* swapchainImages,
VkImageView* swapchainViews,
Frame* frames,
uint32_t swapchainImageCount );
RenderDevice( RenderDevice const& ) = delete;
RenderDevice& operator=( RenderDevice const& ) = delete;
RenderDevice( RenderDevice&& ) noexcept = delete;
RenderDevice& operator=( RenderDevice&& ) noexcept = delete;
VkImage* swapchain_images,
VkImageView* swapchain_views,
Frame* frames,
uint32_t swapchain_image_count );
#if defined( DTOR_TEST )
~RenderDevice();
#endif
};
RenderDevice* RenderDevice_Create( GlobalMemory* mem, RenderDevice::CreateInfo const& createInfo );
} // namespace Blaze

View File

@ -0,0 +1,338 @@
#include "TextureManager.h"
#include "FreeList.h"
#include "GlobalMemory.h"
#include "RenderDevice.h"
using Blaze::TextureManager;
Blaze::TextureID TextureManager::CreateTexture(
VkExtent3D const extent, VkSampler const sampler, VkFormat const format )
{
ASSERT( not m_freeList.Empty() );
Texture* texture_slot = reinterpret_cast<Texture*>( m_freeList.PopFront() );
++m_count;
VkImage texture;
VmaAllocation texture_allocation;
VkImageView texture_view;
uint32_t const mip_levels = CalculateRequiredMipLevels( extent.width, extent.height, extent.depth );
VkImageCreateInfo const image_create_info = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.imageType = VK_IMAGE_TYPE_2D,
.format = format,
.extent = extent,
.mipLevels = mip_levels,
.arrayLayers = 1,
.samples = VK_SAMPLE_COUNT_1_BIT,
.tiling = VK_IMAGE_TILING_OPTIMAL,
.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
};
VmaAllocationCreateInfo constexpr allocation_create_info = {
.flags = 0,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = 0,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VK_CHECK( vmaCreateImage(
m_renderDevice->gpuAllocator,
&image_create_info,
&allocation_create_info,
&texture,
&texture_allocation,
nullptr ) );
VkImageSubresourceRange const subresource_range = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = mip_levels,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkComponentMapping constexpr component_mapping = {
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
.b = VK_COMPONENT_SWIZZLE_IDENTITY,
.a = VK_COMPONENT_SWIZZLE_IDENTITY,
};
VkImageViewCreateInfo const image_view_create_info = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.image = texture,
.viewType = VK_IMAGE_VIEW_TYPE_2D,
.format = image_create_info.format,
.components = component_mapping,
.subresourceRange = subresource_range,
};
VK_CHECK( vkCreateImageView( m_renderDevice->device, &image_view_create_info, nullptr, &texture_view ) );
// NOTE: textureSlot preserves index between uses.
uint32_t index = texture_slot->index;
new ( texture_slot ) Texture{
.image = texture,
.allocation = texture_allocation,
.view = texture_view,
.extent = extent,
.format = format,
.index = index,
};
uint32_t const inner_index = index & kIndexMask;
// TODO: Batch all writes.
VkDescriptorImageInfo const descriptor_image_info = {
.sampler = sampler,
.imageView = texture_view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
};
VkWriteDescriptorSet const descriptor_write = {
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.pNext = nullptr,
.dstSet = m_descriptorSet,
.dstBinding = 0,
.dstArrayElement = inner_index,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &descriptor_image_info,
.pBufferInfo = nullptr,
.pTexelBufferView = nullptr,
};
vkUpdateDescriptorSets( m_renderDevice->device, 1, &descriptor_write, 0, nullptr );
// NOTE: Memory hackery to create TextureID;
return *reinterpret_cast<TextureID*>( &index );
}
bool TextureManager::IsValidID( TextureID const& rid ) const
{
uint32_t const index = *reinterpret_cast<uint32_t const*>( &rid );
uint32_t const inner_index = index & kIndexMask;
if ( inner_index > m_capacity ) return false;
return m_textures[inner_index].index == index;
}
void TextureManager::FreeTexture( TextureID* rid )
{
if ( not IsValidID( *rid ) ) return;
Texture& texture = FetchTextureUnchecked( *rid );
DestroyTexture( texture );
*rid = {};
}
std::optional<VkImage> TextureManager::FetchImage( TextureID const& rid )
{
if ( not IsValidID( rid ) ) return std::nullopt;
return FetchTextureUnchecked( rid ).image;
}
std::optional<VkImageView> TextureManager::FetchImageView( TextureID const& rid )
{
if ( not IsValidID( rid ) ) return std::nullopt;
return FetchTextureUnchecked( rid ).view;
}
VkDescriptorSetLayout const& TextureManager::DescriptorLayout() const
{
return m_descriptorSetLayout;
}
VkDescriptorSet const& TextureManager::DescriptorSet() const
{
return m_descriptorSet;
}
void TextureManager::Destroy()
{
#if defined( _DEBUG )
if ( m_count > 0 )
{
SDL_LogError( SDL_LOG_CATEGORY_ERROR, "%u textures still allocated.", m_count );
}
#endif
ASSERT( m_renderDevice );
while ( not m_freeList.Empty() )
{
Texture* tex = reinterpret_cast<Texture*>( m_freeList.PopFront() );
memset( tex, 0, sizeof *tex );
}
for ( Texture& tex : std::span{ m_textures, m_count } )
{
DestroyTexture( tex );
}
m_descriptorSet = nullptr;
vkDestroyDescriptorPool( m_renderDevice->device, Take( m_descriptorPool ), nullptr );
vkDestroyDescriptorSetLayout( m_renderDevice->device, Take( m_descriptorSetLayout ), nullptr );
}
TextureManager::~TextureManager()
{
ASSERT( not m_textures );
}
void TextureManager::DestroyTexture( Texture& tex )
{
if ( not tex.image ) return;
ASSERT( m_renderDevice );
uint32_t const index = tex.index;
uint32_t const inner_index = index & kIndexMask;
uint32_t const generation = ( index & kGenerationMask ) >> kGenerationOffset;
vkDestroyImageView( m_renderDevice->device, Take( tex.view ), nullptr );
vmaDestroyImage( m_renderDevice->gpuAllocator, Take( tex.image ), Take( tex.allocation ) );
tex.extent = {};
tex.format = VK_FORMAT_UNDEFINED;
tex.index = inner_index | ( generation + 1 ) << kGenerationOffset;
// NOTE: DO NOT EDIT INNER INDEX.
ASSERT( inner_index == ( tex.index & kIndexMask ) and "Index should not be modified" );
ASSERT( tex.index > index and "Generation should increase." );
m_freeList.PushBack( reinterpret_cast<Util::FreeList::Node*>( &tex ) );
--m_count;
}
uint32_t TextureManager::CalculateRequiredMipLevels( uint32_t const w, uint32_t const h, uint32_t const d )
{
uint32_t const max_dim = std::max( std::max( w, h ), d );
return 1 + static_cast<uint32_t>( floorf( log2f( static_cast<float>( max_dim ) ) ) );
}
Blaze::Texture& TextureManager::FetchTextureUnchecked( TextureID const& rid )
{
uint32_t const index = *reinterpret_cast<uint32_t const*>( &rid );
uint32_t const inner_index = index & kIndexMask;
return m_textures[inner_index];
}
TextureManager::TextureManager(
Blaze::RenderDevice* render_device,
Texture* textures,
uint32_t const capacity,
VkDescriptorSetLayout const set_layout,
VkDescriptorPool const pool,
VkDescriptorSet const descriptor_set )
: m_renderDevice{ render_device }
, m_textures{ textures }
, m_count{ 0 }
, m_capacity{ capacity }
, m_descriptorSetLayout{ set_layout }
, m_descriptorPool{ pool }
, m_descriptorSet{ descriptor_set }
{
uint32_t i = 0;
for ( Texture& tex : std::span{ m_textures, m_capacity } )
{
// Default Generation is 1
tex.index = i++ | ( 1 << kGenerationOffset );
m_freeList.PushFront( reinterpret_cast<Util::FreeList::Node*>( &tex ) );
}
}
TextureManager* TextureManager::Create( GlobalMemory* mem, RenderDevice* render_device, uint32_t const max_count )
{
Texture* textures = reinterpret_cast<Texture*>( mem->Allocate( max_count * sizeof( Texture ), alignof( Texture ) ) );
if ( not textures ) return nullptr;
VkDescriptorSetLayoutBinding const descriptor_set_layout_binding{
.binding = 0,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = max_count,
.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
.pImmutableSamplers = nullptr,
};
VkDescriptorBindingFlags flags = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT |
VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT |
VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT;
VkDescriptorSetLayoutBindingFlagsCreateInfo const bindless_binding = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO,
.pNext = nullptr,
.bindingCount = 1,
.pBindingFlags = &flags,
};
VkDescriptorSetLayoutCreateInfo const descriptor_set_layout_create_info = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.pNext = &bindless_binding,
.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT,
.bindingCount = 1,
.pBindings = &descriptor_set_layout_binding,
};
VkDescriptorSetLayout descriptor_set_layout;
VK_CHECK( vkCreateDescriptorSetLayout(
render_device->device, &descriptor_set_layout_create_info, nullptr, &descriptor_set_layout ) );
VkDescriptorPoolSize const pool_size = {
.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = max_count,
};
VkDescriptorPoolCreateInfo const descriptor_pool_create_info = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
.pNext = nullptr,
.flags = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT,
.maxSets = 1,
.poolSizeCount = 1,
.pPoolSizes = &pool_size,
};
VkDescriptorPool descriptor_pool;
VK_CHECK( vkCreateDescriptorPool( render_device->device, &descriptor_pool_create_info, nullptr, &descriptor_pool ) );
VkDescriptorSetAllocateInfo const descriptor_set_allocate_info = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
.pNext = nullptr,
.descriptorPool = descriptor_pool,
.descriptorSetCount = 1,
.pSetLayouts = &descriptor_set_layout,
};
VkDescriptorSet descriptor_set;
VK_CHECK( vkAllocateDescriptorSets( render_device->device, &descriptor_set_allocate_info, &descriptor_set ) );
std::byte* allocation = mem->Allocate( sizeof( TextureManager ), alignof( TextureManager ) );
if ( not allocation ) return nullptr;
return new ( allocation )
TextureManager{ render_device, textures, max_count, descriptor_set_layout, descriptor_pool, descriptor_set };
}

View File

@ -0,0 +1,103 @@
#pragma once
#include <optional>
#include <span>
#include "FreeList.h"
#include "MacroUtils.h"
#include "RID.h"
#include "RenderDevice.h"
#include "VulkanHeader.h"
namespace Blaze
{
struct GlobalMemory;
struct RenderDevice;
struct Texture
{
VkImage image;
VmaAllocation allocation;
VkImageView view;
VkExtent3D extent;
VkFormat format;
uint32_t index;
};
static_assert( sizeof( Texture ) > sizeof( Util::FreeList::Node ) and "Texture is used intrusively by FreeList" );
static_assert(
offsetof( Texture, index ) >= sizeof( Util::FreeList::Node ) and
"Index should not be overwritten even in invalid state" );
extern template struct RID<Texture>;
using TextureID = RID<Texture>;
struct TextureManager
{
private:
constexpr static uint32_t kIndexMask = 0x0007FFFF;
constexpr static uint32_t kGenerationMask = ~kIndexMask;
constexpr static uint32_t kGenerationOffset = 19;
static_assert(
( ( kGenerationMask >> kGenerationOffset & 0x1 ) == 0x1 ) and
( ( kGenerationMask >> ( kGenerationOffset - 1 ) & 0x1 ) != 0x1 ) and "Checks boundary" );
RenderDevice* m_renderDevice;
// Texture Manager
Texture* m_textures;
uint32_t m_count;
uint32_t m_capacity;
Util::FreeList m_freeList;
// Bindless Descriptor Info
VkDescriptorSetLayout m_descriptorSetLayout;
VkDescriptorPool m_descriptorPool;
VkDescriptorSet m_descriptorSet;
void DestroyTexture( Texture& tex );
Texture& FetchTextureUnchecked( TextureID const& rid );
public:
static uint32_t CalculateRequiredMipLevels( uint32_t w, uint32_t h, uint32_t d );
[[nodiscard]] bool IsValidID( TextureID const& rid ) const;
// [[nodiscard]] std::optional<TextureID> createTexture( VkExtent3D extent );
void FreeTexture( TextureID* rid );
DEPRECATE_JULY_2025
[[nodiscard]] TextureID CreateTexture(
VkExtent3D extent, VkSampler sampler, VkFormat format = VK_FORMAT_R8G8B8A8_SRGB );
DEPRECATE_JULY_2025
std::optional<VkImage> FetchImage( TextureID const& rid );
DEPRECATE_JULY_2025
std::optional<VkImageView> FetchImageView( TextureID const& rid );
[[nodiscard]] VkDescriptorSetLayout const& DescriptorLayout() const;
[[nodiscard]] VkDescriptorSet const& DescriptorSet() const;
//
TextureManager(
RenderDevice* render_device,
Texture* textures,
uint32_t capacity,
VkDescriptorSetLayout set_layout,
VkDescriptorPool pool,
VkDescriptorSet descriptor_set );
static TextureManager* Create( GlobalMemory* mem, RenderDevice* render_device, uint32_t max_count );
void Destroy();
TextureManager( TextureManager const& other ) = delete;
TextureManager( TextureManager&& other ) noexcept = delete;
TextureManager& operator=( TextureManager const& other ) = delete;
TextureManager& operator=( TextureManager&& other ) noexcept = delete;
~TextureManager();
};
} // namespace Blaze

View File

@ -1,342 +0,0 @@
#include "TextureManager.h"
#include "FreeList.h"
#include "GlobalMemory.h"
#include "RenderDevice.h"
template struct RID<Texture>;
std::optional<TextureID> TextureManager::createTexture( VkExtent3D const extent, VkSampler const sampler, VkFormat const format )
{
if ( m_freeList.empty() )
{
return std::nullopt;
}
Texture* textureSlot = reinterpret_cast<Texture*>( m_freeList.popFront() );
++m_count;
ASSERT( m_pRenderDevice );
RenderDevice const& renderDevice = *m_pRenderDevice;
VkImage texture;
VmaAllocation textureAllocation;
VkImageView textureView;
uint32_t const mipLevels = calculateRequiredMipLevels( extent.width, extent.height, extent.depth );
VkImageCreateInfo const imageCreateInfo = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.imageType = VK_IMAGE_TYPE_2D,
.format = format,
.extent = extent,
.mipLevels = mipLevels,
.arrayLayers = 1,
.samples = VK_SAMPLE_COUNT_1_BIT,
.tiling = VK_IMAGE_TILING_OPTIMAL,
.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
};
VmaAllocationCreateInfo constexpr allocationCreateInfo = {
.flags = 0,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = 0,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VK_CHECK( vmaCreateImage(
renderDevice.gpuAllocator, &imageCreateInfo, &allocationCreateInfo, &texture, &textureAllocation, nullptr ) );
VkImageSubresourceRange const subresourceRange = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = mipLevels,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkComponentMapping constexpr componentMapping = {
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
.b = VK_COMPONENT_SWIZZLE_IDENTITY,
.a = VK_COMPONENT_SWIZZLE_IDENTITY,
};
VkImageViewCreateInfo const imageViewCreateInfo = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.image = texture,
.viewType = VK_IMAGE_VIEW_TYPE_2D,
.format = imageCreateInfo.format,
.components = componentMapping,
.subresourceRange = subresourceRange,
};
VK_CHECK( vkCreateImageView( renderDevice.device, &imageViewCreateInfo, nullptr, &textureView ) );
// NOTE: textureSlot preserves index between uses.
uint32_t index = textureSlot->index;
new ( textureSlot ) Texture{
.image = texture,
.allocation = textureAllocation,
.view = textureView,
.extent = extent,
.format = format,
.index = index,
};
uint32_t const innerIndex = index & INDEX_MASK;
// TODO: Batch all writes.
VkDescriptorImageInfo const descriptorImageInfo = {
.sampler = sampler,
.imageView = textureView,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
};
VkWriteDescriptorSet const descriptorWrite = {
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.pNext = nullptr,
.dstSet = m_descriptorSet,
.dstBinding = 0,
.dstArrayElement = innerIndex,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &descriptorImageInfo,
.pBufferInfo = nullptr,
.pTexelBufferView = nullptr,
};
vkUpdateDescriptorSets( renderDevice.device, 1, &descriptorWrite, 0, nullptr );
// NOTE: Memory hackery to create TextureID;
return std::move( *reinterpret_cast<TextureID*>( &index ) );
}
bool TextureManager::isValidID( TextureID const& rid ) const
{
uint32_t const index = *reinterpret_cast<uint32_t const*>( &rid );
uint32_t const innerIndex = index & INDEX_MASK;
if ( innerIndex > m_capacity ) return false;
return m_aTextures[innerIndex].index == index;
}
void TextureManager::freeTexture( TextureID&& rid )
{
if ( not isValidID( rid ) ) return;
Texture& texture = fetchTextureUnchecked( rid );
destroyTexture( texture );
auto _ = std::move( rid );
}
std::optional<VkImage> TextureManager::fetchImage( TextureID const& rid )
{
if ( not isValidID( rid ) ) return std::nullopt;
return fetchTextureUnchecked( rid ).image;
}
std::optional<VkImageView> TextureManager::fetchImageView( TextureID const& rid )
{
if ( not isValidID( rid ) ) return std::nullopt;
return fetchTextureUnchecked( rid ).view;
}
VkDescriptorSetLayout const& TextureManager::descriptorLayout() const
{
return m_descriptorSetLayout;
}
VkDescriptorSet const& TextureManager::descriptorSet() const
{
return m_descriptorSet;
}
void TextureManager::destroy()
{
#if defined( _DEBUG )
if ( m_count > 0 )
{
SDL_LogError( SDL_LOG_CATEGORY_ERROR, "%u textures still allocated.", m_count );
}
#endif
ASSERT( m_pRenderDevice );
RenderDevice const& renderDevice = *m_pRenderDevice;
while ( not m_freeList.empty() )
{
Texture* tex = reinterpret_cast<Texture*>( m_freeList.popFront() );
memset( tex, 0, sizeof *tex );
}
for ( Texture& tex : std::span{ m_aTextures, m_count } )
{
destroyTexture( tex );
}
m_descriptorSet = nullptr;
vkDestroyDescriptorPool( renderDevice.device, Take( m_descriptorPool ), nullptr );
vkDestroyDescriptorSetLayout( renderDevice.device, Take( m_descriptorSetLayout ), nullptr );
}
TextureManager::~TextureManager()
{
ASSERT( not m_aTextures );
}
void TextureManager::destroyTexture( Texture& tex )
{
if ( not tex.image ) return;
ASSERT( m_pRenderDevice );
uint32_t const index = tex.index;
uint32_t const innerIndex = index & INDEX_MASK;
uint32_t const generation = ( index & GENERATION_MASK ) >> GENERATION_OFFSET;
RenderDevice const& renderDevice = *m_pRenderDevice;
vkDestroyImageView( renderDevice.device, Take( tex.view ), nullptr );
vmaDestroyImage( renderDevice.gpuAllocator, Take( tex.image ), Take( tex.allocation ) );
tex.extent = {};
tex.format = VK_FORMAT_UNDEFINED;
tex.index = innerIndex | ( generation + 1 ) << GENERATION_OFFSET;
// NOTE: DO NOT EDIT INNER INDEX.
ASSERT( innerIndex == ( tex.index & INDEX_MASK ) and "Index should not be modified" );
ASSERT( tex.index > index and "Generation should increase." );
m_freeList.pushBack( reinterpret_cast<FreeList::Node*>( &tex ) );
--m_count;
}
uint32_t TextureManager::calculateRequiredMipLevels( uint32_t const w, uint32_t const h, uint32_t const d )
{
uint32_t const maxDim = std::max( std::max( w, h ), d );
return 1 + static_cast<uint32_t>( floorf( log2f( static_cast<float>( maxDim ) ) ) );
}
Texture& TextureManager::fetchTextureUnchecked( TextureID const& rid )
{
uint32_t const index = *reinterpret_cast<uint32_t const*>( &rid );
uint32_t const innerIndex = index & INDEX_MASK;
return m_aTextures[innerIndex];
}
TextureManager::TextureManager(
RenderDevice* pRenderDevice,
Texture* aTextures,
uint32_t const capacity,
VkDescriptorSetLayout const setLayout,
VkDescriptorPool const pool,
VkDescriptorSet const descriptorSet )
: m_pRenderDevice{ pRenderDevice }
, m_aTextures{ aTextures }
, m_count{ 0 }
, m_capacity{ capacity }
, m_descriptorSetLayout{ setLayout }
, m_descriptorPool{ pool }
, m_descriptorSet{ descriptorSet }
{
uint32_t i = 0;
for ( Texture& tex : std::span{ m_aTextures, m_capacity } )
{
// Default Generation is 1
tex.index = i++ | ( 1 << GENERATION_OFFSET );
m_freeList.pushFront( reinterpret_cast<FreeList::Node*>( &tex ) );
}
}
TextureManager* TextureManager_Create( GlobalMemory* mem, RenderDevice* renderDevice, uint32_t const maxCount )
{
Texture* textures = reinterpret_cast<Texture*>( mem->allocate( maxCount * sizeof( Texture ), alignof( Texture ) ) );
if ( not textures ) return nullptr;
VkDescriptorSetLayoutBinding const descriptorSetLayoutBinding{
.binding = 0,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = maxCount,
.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
.pImmutableSamplers = nullptr,
};
VkDescriptorBindingFlags flags = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT |
VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT |
VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT;
VkDescriptorSetLayoutBindingFlagsCreateInfo const bindlessBinding = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO,
.pNext = nullptr,
.bindingCount = 1,
.pBindingFlags = &flags,
};
VkDescriptorSetLayoutCreateInfo const descriptorSetLayoutCreateInfo = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.pNext = &bindlessBinding,
.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT,
.bindingCount = 1,
.pBindings = &descriptorSetLayoutBinding,
};
VkDescriptorSetLayout descriptorSetLayout;
VK_CHECK( vkCreateDescriptorSetLayout(
renderDevice->device, &descriptorSetLayoutCreateInfo, nullptr, &descriptorSetLayout ) );
VkDescriptorPoolSize const poolSize = {
.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = maxCount,
};
VkDescriptorPoolCreateInfo const descriptorPoolCreateInfo = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
.pNext = nullptr,
.flags = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT,
.maxSets = 1,
.poolSizeCount = 1,
.pPoolSizes = &poolSize,
};
VkDescriptorPool descriptorPool;
VK_CHECK( vkCreateDescriptorPool( renderDevice->device, &descriptorPoolCreateInfo, nullptr, &descriptorPool ) );
VkDescriptorSetAllocateInfo const descriptorSetAllocateInfo = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
.pNext = nullptr,
.descriptorPool = descriptorPool,
.descriptorSetCount = 1,
.pSetLayouts = &descriptorSetLayout,
};
VkDescriptorSet descriptorSet;
VK_CHECK( vkAllocateDescriptorSets( renderDevice->device, &descriptorSetAllocateInfo, &descriptorSet ) );
std::byte* allocation = mem->allocate( sizeof( TextureManager ), alignof( TextureManager ) );
if ( not allocation ) return nullptr;
return new ( allocation )
TextureManager{ renderDevice, textures, maxCount, descriptorSetLayout, descriptorPool, descriptorSet };
}

View File

@ -1,99 +0,0 @@
#pragma once
#include <optional>
#include <span>
#include "FreeList.h"
#include "MacroUtils.h"
#include "RID.h"
#include "RenderDevice.h"
#include "VulkanHeader.h"
struct GlobalMemory;
struct RenderDevice;
struct Texture
{
VkImage image;
VmaAllocation allocation;
VkImageView view;
VkExtent3D extent;
VkFormat format;
uint32_t index;
};
static_assert( sizeof( Texture ) > sizeof( FreeList::Node ) and "Texture is used intrusively by FreeList" );
static_assert(
offsetof( Texture, index ) >= sizeof( FreeList::Node ) and
"Index should not be overwritten even in invalid state" );
extern template struct RID<Texture>;
using TextureID = RID<Texture>;
struct TextureManager
{
private:
constexpr static uint32_t INDEX_MASK = 0x0007FFFF;
constexpr static uint32_t GENERATION_MASK = ~INDEX_MASK;
constexpr static uint32_t GENERATION_OFFSET = 19;
static_assert(
( ( GENERATION_MASK >> GENERATION_OFFSET & 0x1 ) == 0x1 ) and
( ( GENERATION_MASK >> ( GENERATION_OFFSET - 1 ) & 0x1 ) != 0x1 ) and "Checks boundary" );
RenderDevice* m_pRenderDevice;
// Texture Manager
Texture* m_aTextures;
uint32_t m_count;
uint32_t m_capacity;
FreeList m_freeList;
// Bindless Descriptor Info
VkDescriptorSetLayout m_descriptorSetLayout;
VkDescriptorPool m_descriptorPool;
VkDescriptorSet m_descriptorSet;
void destroyTexture( Texture& tex );
Texture& fetchTextureUnchecked( TextureID const& rid );
public:
static uint32_t calculateRequiredMipLevels( uint32_t w, uint32_t h, uint32_t d );
[[nodiscard]] bool isValidID( TextureID const& rid ) const;
// [[nodiscard]] std::optional<TextureID> createTexture( VkExtent3D extent );
void freeTexture( TextureID&& rid );
DEPRECATE_JULY_2025
[[nodiscard]] std::optional<TextureID> createTexture(
VkExtent3D extent, VkSampler sampler, VkFormat format = VK_FORMAT_R8G8B8A8_SRGB );
DEPRECATE_JULY_2025
std::optional<VkImage> fetchImage( TextureID const& rid );
DEPRECATE_JULY_2025
std::optional<VkImageView> fetchImageView( TextureID const& rid );
[[nodiscard]] VkDescriptorSetLayout const& descriptorLayout() const;
[[nodiscard]] VkDescriptorSet const& descriptorSet() const;
//
TextureManager(
RenderDevice* pRenderDevice,
Texture* aTextures,
uint32_t capacity,
VkDescriptorSetLayout setLayout,
VkDescriptorPool pool,
VkDescriptorSet descriptorSet );
void destroy();
TextureManager( TextureManager const& other ) = delete;
TextureManager( TextureManager&& other ) noexcept = delete;
TextureManager& operator=( TextureManager const& other ) = delete;
TextureManager& operator=( TextureManager&& other ) noexcept = delete;
~TextureManager();
};
TextureManager* TextureManager_Create( GlobalMemory* mem, RenderDevice* renderDevice, uint32_t maxCount );

12
PLAN.md
View File

@ -21,12 +21,12 @@
- [X] Create Vertex buffer
- [X] Load texture
- [X] Draw
- [ ] Render Sponza
- [X] Load GLTF Scene
- [ ] Support Albedo
- [ ] Support Normal
- [ ] Support Metal/Rough
- [ ] Support Emission
- [ ] PBR
- [X] Point Lights
- [ ] Directional Lights
- [ ] Spot Lights
- [ ] IBL
- [ ] Probe-IBL
## Features
- [ ] Scene Rendering