Compare commits

...

42 Commits

Author SHA1 Message Date
Anish Bhobe 38b697f202 Move all buffers to DeviceAddress. 2025-06-01 19:24:31 +02:00
Anish Bhobe 19e3222460 Rename systems::Device to RenderingDevice to avoid ambiguity. 2025-05-31 21:42:14 +02:00
Anish Bhobe 4cdb39c6ba Change name for pipeline creation. 2025-05-31 11:51:21 +02:00
Anish Bhobe cc1fd12b64 fix: Async Compute 2025-05-28 20:58:00 +02:00
Anish Bhobe cfb76c7d78 East Const supremacy. 2025-05-27 18:52:04 +02:00
Anish Bhobe 58edfef94d General Cleanup. 2025-05-27 18:30:50 +02:00
Anish Bhobe 3ca3beb1e4 fix: Improper GGX usage. 2025-05-25 21:29:08 +02:00
Anish Bhobe 4f71df797c Model Render updated. 2025-05-25 19:23:19 +02:00
Anish Bhobe befa36c7f1 ContextPool support for unordered contexts. 2025-05-18 00:06:06 +02:00
Anish Bhobe 3b4ea52611 ContextPools for Frames. 2025-05-17 15:25:33 +02:00
Anish Bhobe 8e2c77bcf1 Clean-up `flake.nix`. 2025-05-13 15:58:42 +02:00
Anish Bhobe 1f8f102ee1 Fixes added for clang.
- Enum values now assigned with C-enums instead of type-safe enums.
- Atomic included at `constants.h` so it's available everywhere.
- Fixed CommitManager forward declaration.
- Added `scalarLayout` option to slang compiler.
2025-05-13 13:00:11 +02:00
Anish Bhobe cc4cffe989 Fix: Usage of `format_to` in `fmt`.
Should not be using versions.
`v11::format_to` -> `fmt::format_to`.
2025-05-13 12:46:58 +02:00
Anish Bhobe adfa86ebe9 fix: obnoxious error of "util/logger". 2025-05-10 18:01:22 +02:00
Anish Bhobe 41c91058b6 Shader reflection added. 2025-05-10 18:00:25 +02:00
Anish Bhobe 3a7a2b4ab7 [WIP] Box moved to 'new API' pending fixes. 2025-05-08 17:44:55 +02:00
Anish Bhobe 63282c3587 [WIP] Added a transfer context for uploads. 2025-05-08 00:34:59 +02:00
Anish Bhobe 7351415ebf Consolidate Present as a special submit. 2025-05-07 18:27:13 +02:00
Anish Bhobe 1db942f1a9 Remove Cast and Recast. 2025-05-07 17:44:01 +02:00
Anish Bhobe 3dc6501246 fix: Error on window resize. 2025-05-06 19:06:04 +02:00
Anish Bhobe 5d6ddbb158 Added slang for Shader code compilation.
TODO: Use slang to create descriptors.
2025-05-06 15:32:58 +02:00
Anish Bhobe 7507394af9 Added Pipeline creation into the Device. 2025-05-03 13:46:44 +02:00
Anish Bhobe 2facb3e6c1 fix: Context memory leak. 2025-05-02 20:32:15 +02:00
Anish Bhobe d683de3181 Draw Triangle and bug-fixes. 2025-05-01 20:05:31 +02:00
Anish Bhobe d82e81d104 Begin Consolidation all objects under the systems::Device interface.
Currently clears a screen.
- Merge all resource creation API under Device.
- Begin a basic Context setup.
2025-05-01 13:27:19 +02:00
Anish Bhobe a790c26f1c Rename Context to Instance. 2025-04-28 21:37:03 +02:00
Anish Bhobe 668189acb5 Fixed bug in Model Loading.
Model Loader was loading indexes into image instead of going via texture.
TODO: Textures also have samplers.
2025-04-10 23:50:57 +02:00
Anish Bhobe b8b620a723 Triangle is ready. 2025-04-09 20:33:38 +02:00
Anish Bhobe 703624eb86 Reworked buffer types. 2025-04-08 23:33:07 +02:00
Anish Bhobe 1748a48272 Image, View and Sampler are all updated. 2025-04-07 00:21:50 +02:00
Anish Bhobe d8770c1e06 [WIP] Updated Buffers.
TODO: Update Image and Views.
2025-04-06 21:02:58 +02:00
Anish Bhobe 1bee73e46f [WIP] Move to shared_ptr. 2025-04-06 19:31:12 +02:00
Anish Bhobe 98660a11fa Rename: ImageViewManager -> ViewManager 2025-04-02 22:48:24 +02:00
Anish Bhobe 8eb5a678fc Cleanup and header re-date. 2025-04-02 22:46:30 +02:00
Anish Bhobe e5b002c8cc Rename freelist and clean up code. 2025-04-02 21:56:49 +02:00
Anish Bhobe ec6aeb6f3b Fixed Commit count issue. 2025-04-02 21:55:05 +02:00
Anish Bhobe 8f9b6d66be At par with old-arch.
FIXED: Bug with black speckles in prefilter.
Caused by MipMapping enabled.
2025-04-02 21:08:14 +02:00
Anish Bhobe aa729610cf [WIP] Fixed texture load corruption issue. 2025-04-02 00:27:57 +02:00
Anish Bhobe 3ab9d838fa [WIP] Separated ImageViews. 2025-04-01 08:54:30 +02:00
Anish Bhobe 73c96dc56b [WIP] Moving ModelRender to new arch.
TODO: ImageView
2025-03-31 21:32:11 +02:00
Anish Bhobe afec1e3e32 Reimplemented RenderResourceManager. 2025-03-24 22:31:47 +01:00
Anish Bhobe 396810d203 RenderResourceManager handles images and bindless. 2025-03-02 19:19:43 +01:00
204 changed files with 17129 additions and 6786 deletions

1
.gitignore vendored
View File

@ -5,3 +5,4 @@ build/
.direnv/
.ccls-cache/
*.user
/vcpkg_installed

View File

@ -4,12 +4,12 @@ cmake_minimum_required(VERSION 3.13)
project(Aster VERSION 0.1.0)
set(CMAKE_CXX_STANDARD 20)
set(CMAKE_CXX_STANDARD 23)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
if (MSVC)
set(CMAKE_CXX_FLAGS "/W4 /GR- ${MSVC_FLAGS}")
set(CMAKE_CXX_FLAGS "/W4 /GR- ${MSVC_FLAGS} /utf-8")
set(CMAKE_CXX_FLAGS_RELEASE "/O3")
add_compile_definitions(_HAS_EXCEPTIONS=0)
add_compile_definitions(_CRT_SECURE_NO_WARNINGS)

View File

@ -1,30 +1,27 @@
function(add_shader TARGET SHADER)
find_package(Vulkan REQUIRED COMPONENTS dxc)
get_filename_component(vulkan-bin-dir ${Vulkan_GLSLC_EXECUTABLE} DIRECTORY)
find_program(slangc_exe NAMES "slangc")
if (NOT slangc_exe STREQUAL "slangc_exe-NOTFOUND")
set(slangc_exe_FOUND true)
endif()
get_filename_component(shader-ext ${SHADER} LAST_EXT)
get_filename_component(shader-inner ${SHADER} NAME_WLE)
get_filename_component(shader-type ${shader-inner} LAST_EXT)
string(REPLACE "." "" shader-type ${shader-type})
set(current-shader-path ${CMAKE_CURRENT_SOURCE_DIR}/${SHADER})
set(current-output-path ${CMAKE_CURRENT_BINARY_DIR}/${SHADER}.spv)
set(current-output-path ${CMAKE_CURRENT_BINARY_DIR}/${SHADER}.slang-module)
set(current-copy-path ${CMAKE_CURRENT_BINARY_DIR}/${SHADER})
get_filename_component(current-output-dir ${current-output-path} DIRECTORY)
file(MAKE_DIRECTORY ${current-output-dir})
if (Vulkan_dxc_exe_FOUND AND ${shader-ext} STREQUAL ".hlsl")
message("Marked as hlsl file. ${current-output-path}")
if (${shader-ext} STREQUAL ".slang")
add_custom_command(
OUTPUT ${current-output-path}
COMMAND Vulkan::dxc_exe ${DXC_SHADER_FLAGS} -spirv -T "${shader-type}_6_0" -E main ${current-shader-path} -Fo ${current-output-path}
DEPENDS ${current-shader-path}
IMPLICIT_DEPENDS CXX ${current-shader-path}
VERBATIM)
elseif (Vulkan_glslc_FOUND AND ${shader-ext} STREQUAL ".glsl")
message("Marked as glsl file. ${current-output-path}")
add_custom_command(
OUTPUT ${current-output-path}
COMMAND Vulkan::glslc ${GLSLC_SHADER_FLAGS} -o ${current-output-path} ${current-shader-path}
OUTPUT ${current-output-path} ${current-copy-path}
COMMAND ${slangc_exe} ${current-shader-path} -o ${current-output-path}
COMMAND ${CMAKE_COMMAND} -E copy ${current-shader-path} ${current-copy-path}
DEPENDS ${current-shader-path}
IMPLICIT_DEPENDS CXX ${current-shader-path}
VERBATIM)
@ -34,9 +31,3 @@ function(add_shader TARGET SHADER)
set_source_files_properties(${current-output-path} PROPERTIES GENERATED TRUE)
target_sources(${TARGET} PRIVATE ${current-output-path})
endfunction(add_shader)
function(add_shaders TARGET SHADERS)
foreach(shader IN ${SHADERS})
add_shader(TARGET ${shader})
endforeach()
endfunction(add_shaders)

View File

@ -9,13 +9,15 @@ find_package(Vulkan REQUIRED)
find_package(fmt CONFIG REQUIRED)
find_package(VulkanMemoryAllocator CONFIG REQUIRED)
find_package(EASTL CONFIG REQUIRED)
find_library(slang NAMES "slang" CONFIG REQUIRED)
find_package(foonathan_memory CONFIG REQUIRED)
add_library(aster_core STATIC)
add_subdirectory("include")
add_subdirectory("src")
set_property(TARGET aster_core PROPERTY CXX_STANDARD 20)
target_compile_features(aster_core PUBLIC cxx_std_23)
target_include_directories(aster_core PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/include/aster")
target_include_directories(aster_core PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include")
@ -26,4 +28,6 @@ target_include_directories(aster_core PRIVATE ${SCOTTT_DEBUGBREAK_INCLUDE_DIRS})
target_link_libraries(aster_core PRIVATE fmt::fmt)
target_link_libraries(aster_core PRIVATE EASTL)
target_link_libraries(aster_core PUBLIC Vulkan::Headers GPUOpen::VulkanMemoryAllocator)
target_link_libraries(aster_core PUBLIC ${slang})
target_link_libraries(aster_core PRIVATE foonathan_memory)

View File

@ -7,7 +7,7 @@ INTERFACE
"global.h"
"constants.h"
"config.h"
"context.h"
"instance.h"
"physical_device.h"
"device.h"
"swapchain.h"
@ -15,7 +15,9 @@ INTERFACE
"queue_allocation.h"
"buffer.h"
"image.h"
"image_view.h"
"surface.h"
"size.h"
"type_traits.h"
"window.h")
"window.h"
"sampler.h")

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: buffer.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
@ -9,125 +9,110 @@
struct Device;
// TODO Refactor the Buffer Hierarchy
/// A Vulkan buffer wrapper.
struct Buffer
{
enum class FlagBits : u8
{
eNone = 0x0,
eStaging = 0x1,
eUniform = 0x2,
eStorage = 0x4,
eIndex = 0x8,
eVertex = 0x10,
eIndirect = 0x20,
};
using Flags = vk::Flags<FlagBits>;
constexpr static Flags FLAGS = {};
Device const *m_Device = nullptr;
vk::Buffer m_Buffer = nullptr;
VmaAllocation m_Allocation = nullptr;
u8 *m_Mapped = nullptr; ///< If the buffer is host visible, it should be (and stay) mapped.
uptr m_DeviceAddr = 0;
usize m_Size = 0;
Flags m_Flags = {};
// If the buffer is host visible, it should be (and stay) mapped.
u8 *m_Mapped = nullptr;
/// @returns True if it is a valid vulkan buffer.
[[nodiscard]] bool
IsValid() const
{
return m_Buffer;
}
[[nodiscard]] usize GetSize() const;
[[nodiscard]] bool IsHostVisible() const;
[[nodiscard]] bool IsValid() const;
[[nodiscard]] bool IsMapped() const;
[[nodiscard]] bool IsOwned() const;
[[nodiscard]] bool IsCommitted() const;
void SetCommitted(bool committed);
/// If the buffer is host visible, it should be (and stay) mapped.
/// @returns True if the buffer is host-visible and mapped.
[[nodiscard]] bool
IsMapped() const
{
return m_Mapped;
}
void Destroy(const Device *device);
void Write(const Device *device, usize offset, usize size, const void *data);
/// Writes the data to the buffer.
/// @note The buffer must be mapped.
void Write(usize offset, usize size, void const *data) const;
void Allocate(const Device *device, usize size, vk::BufferUsageFlags bufferUsage,
VmaAllocationCreateFlags allocationFlags, VmaMemoryUsage memoryUsage, cstr name);
/// If Buffer Device Address is enabled,
/// Get a pointer.
[[nodiscard]] uptr GetDeviceAddress() const;
uptr
GetDeviceAddress(const Device *device);
// Constructors
// Buffer.size is used for bookkeeping
// If the buffer is Invalid, the remaining data in Buffer is used intrusively by `GpuResourceManager`.
usize m_Size_ = 0;
Buffer(Device const *device, usize size, vk::BufferUsageFlags bufferUsage, VmaAllocationCreateFlags allocationFlags,
VmaMemoryUsage memoryUsage, cstr name);
constexpr static usize VALID_BUFFER_BIT = Cast<usize>(1llu << 63);
constexpr static usize OWNED_BIT = 1llu << 62;
constexpr static usize COMMITTED_BIT = 1llu << 61;
constexpr static usize SIZE_MASK = ~(VALID_BUFFER_BIT | OWNED_BIT | COMMITTED_BIT);
Buffer(Buffer &&other) noexcept;
Buffer &operator=(Buffer &&other) noexcept;
~Buffer();
DISALLOW_COPY_AND_ASSIGN(Buffer);
};
template <>
constexpr bool concepts::GpuResource<Buffer> = true;
// Ensure that m_Size doesn't get used intrusively since it manages the state.
static_assert(offsetof(Buffer, m_Size_) > sizeof(usize));
struct UniformBuffer : Buffer
{
void Init(const Device *device, usize size, cstr name = nullptr);
constexpr static Flags FLAGS = FlagBits::eUniform;
};
struct StorageBuffer : Buffer
{
void Init(const Device *device, usize size, bool hostVisible, cstr name = nullptr);
void Init(const Device *device, usize size, bool hostVisible, bool deviceAddress, cstr name = nullptr);
constexpr static Flags FLAGS = FlagBits::eStorage;
};
struct IndirectBuffer : Buffer
{
void Init(const Device *device, usize size, bool hostVisible, cstr name = nullptr);
};
struct StorageIndexBuffer : StorageBuffer
{
void Init(const Device *device, usize size, bool hostVisible, bool deviceAddress, cstr name = nullptr);
constexpr static Flags FLAGS = FlagBits::eIndirect;
};
struct VertexBuffer : Buffer
{
void Init(const Device *device, usize size, cstr name = nullptr);
void Write(const Device *device, void *data, usize size, usize offset) const = delete;
constexpr static Flags FLAGS = FlagBits::eVertex;
};
struct IndexBuffer : Buffer
{
void Init(const Device *device, usize size, cstr name = nullptr);
void Write(const Device *device, void *data, usize size, usize offset) const = delete;
constexpr static Flags FLAGS = FlagBits::eIndex;
};
struct StagingBuffer : Buffer
{
void Init(const Device *device, usize size, cstr name = nullptr);
constexpr static Flags FLAGS = FlagBits::eStaging;
};
inline usize
Buffer::GetSize() const
namespace concepts
{
return m_Size_ & SIZE_MASK;
}
template <typename T>
concept AnyBuffer = std::derived_from<T, Buffer>;
inline bool
Buffer::IsHostVisible() const
{
return IsMapped();
}
template <typename T, typename TInto>
concept BufferInto = std::derived_from<T, Buffer> and std::derived_from<TInto, Buffer> and
(static_cast<bool>(T::FLAGS & TInto::FLAGS) or std::same_as<Buffer, TInto>);
inline bool
Buffer::IsValid() const
{
return m_Size_ & VALID_BUFFER_BIT;
}
template <typename T>
concept AnyBufferRef = Deref<T> and AnyBuffer<DerefType<T>>;
inline bool
Buffer::IsMapped() const
{
return m_Mapped;
}
template <typename T, typename TTo>
concept BufferRefTo = Deref<T> and BufferInto<DerefType<T>, TTo>;
inline bool
Buffer::IsOwned() const
{
return m_Size_ & OWNED_BIT;
}
inline bool
Buffer::IsCommitted() const
{
return m_Size_ & COMMITTED_BIT;
}
inline void
Buffer::SetCommitted(const bool committed)
{
m_Size_ = committed ? (m_Size_ | COMMITTED_BIT) : (m_Size_ & ~COMMITTED_BIT);
}
} // namespace concepts

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: config.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
@ -15,6 +15,8 @@
#define VULKAN_HPP_NO_STRUCT_CONSTRUCTORS
#define VULKAN_HPP_DISABLE_ENHANCED_MODE 1
#define VULKAN_HPP_NO_EXCEPTIONS 1
#define VULKAN_HPP_NO_SMART_HANDLE 1
#define VULKAN_HPP_NO_STRUCT_SETTERS 1
#define VMA_STATIC_VULKAN_FUNCTIONS 0
#define VMA_DYNAMIC_VULKAN_FUNCTIONS 1

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: constants.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
@ -13,6 +13,8 @@
#include <glm/glm.hpp>
#include <glm/gtx/quaternion.hpp>
#include <atomic>
using c8 = char;
using u8 = uint8_t;
using u16 = uint16_t;
@ -28,8 +30,9 @@ using f128 = long double;
using b8 = bool;
using b32 = u32;
using usize = size_t;
using isize = intptr_t;
using uptr = uintptr_t;
using cstr = const char *;
using cstr = char const *;
namespace ansi_color
{
@ -44,42 +47,28 @@ constexpr auto White = "\u001b[37m";
constexpr auto Reset = "\u001b[0m";
} // namespace ansi_color
template <typename TType, typename TFrom>
constexpr auto
Cast(TFrom &&in)
{
return static_cast<TType>(std::forward<TFrom>(in));
}
template <typename TType, typename TFrom>
constexpr auto
Recast(TFrom &&in)
{
return reinterpret_cast<TType>(std::forward<TFrom>(in));
}
constexpr f32
operator""_deg(long double degrees)
{
return glm::radians<f32>(Cast<f32>(degrees));
return glm::radians<f32>(static_cast<f32>(degrees));
}
constexpr f32
operator""_deg(unsigned long long int degrees)
{
return glm::radians<f32>(Cast<f32>(degrees));
return glm::radians<f32>(static_cast<f32>(degrees));
}
constexpr f32
operator""_rad(long double rads)
{
return Cast<f32>(rads);
return static_cast<f32>(rads);
}
constexpr f32
operator""_rad(unsigned long long int rads)
{
return Cast<f32>(rads);
return static_cast<f32>(rads);
}
using glm::ivec2;
@ -116,31 +105,31 @@ constexpr Version VERSION = {
};
constexpr u32
Kilobyte(const u32 in)
Kilobyte(u32 const in)
{
return in * 1024;
}
constexpr usize
Kilobyte(const usize in)
Kilobyte(usize const in)
{
return in * 1024;
}
constexpr u32
Megabyte(const u32 in)
Megabyte(u32 const in)
{
return in * 1024 * 1024;
}
constexpr usize
Megabyte(const usize in)
Megabyte(usize const in)
{
return in * 1024 * 1024;
}
constexpr usize
Gigabyte(const usize in)
Gigabyte(usize const in)
{
return in * 1024 * 1024 * 1024;
}

View File

@ -1,17 +1,17 @@
// =============================================
// Aster: device.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "global.h"
#include <EASTL/vector.h>
#include <EASTL/span.h>
#include <EASTL/vector.h>
struct QueueAllocation;
struct Context;
struct Instance;
struct PhysicalDevice;
struct Features
@ -31,19 +31,31 @@ struct Device final
vk::PipelineCache m_PipelineCache = nullptr;
bool m_ValidationEnabled = true;
template <typename T>
requires vk::isVulkanHandleType<T>::value void SetName(const T &object, cstr name) const;
template <concepts::VkHandle T>
void SetName(T const &object, cstr name) const;
[[nodiscard]] vk::Queue GetQueue(u32 familyIndex, u32 queueIndex) const;
[[nodiscard]] eastl::vector<u8> DumpPipelineCache() const;
void WaitIdle() const;
vk::Device *
operator->()
{
return &m_Device;
}
vk::Device const *
operator->() const
{
return &m_Device;
}
// Ctor/Dtor
Device(const Context *context, PhysicalDevice *physicalDevice, Features *enabledFeatures,
const eastl::vector<QueueAllocation> &queueAllocations, NameString &&name);
Device(const Context *context, PhysicalDevice *physicalDevice, Features *enabledFeatures,
const eastl::vector<QueueAllocation> &queueAllocations, eastl::span<u8> &&pipelineCacheData, NameString &&name);
Device() = default;
Device(Instance const &context, PhysicalDevice &physicalDevice, Features &enabledFeatures,
eastl::span<QueueAllocation> const &queueAllocations, eastl::span<u8> const &pipelineCacheData,
NameString &&name);
~Device();
// Move
@ -53,15 +65,15 @@ struct Device final
DISALLOW_COPY_AND_ASSIGN(Device);
};
template <typename T>
requires vk::isVulkanHandleType<T>::value void
Device::SetName(const T &object, cstr name) const
template <concepts::VkHandle T>
void
Device::SetName(T const &object, cstr name) const
{
if (!m_ValidationEnabled || !name || !object)
return;
auto handle = Recast<u64>(Cast<typename T::NativeType>(object));
const vk::DebugUtilsObjectNameInfoEXT objectNameInfo = {
auto handle = reinterpret_cast<u64>(static_cast<typename T::NativeType>(object));
vk::DebugUtilsObjectNameInfoEXT const objectNameInfo = {
.objectType = object.objectType,
.objectHandle = handle,
.pObjectName = name,

View File

@ -1,17 +1,20 @@
// =============================================
// Aster: global.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "config.h"
#include "constants.h"
#include "util/logger.h"
#include "aster/util/logger.h"
#include <GLFW/glfw3.h>
#include <glm/glm.hpp>
#include <EASTL/shared_ptr.h>
#include <fmt/format.h>
// Macros that can collide with functions.
@ -26,6 +29,8 @@
#if !defined(NDEBUG)
#define VULKAN_HPP_ASSERT(expr) DEBUG_IF(!(expr), "Vulkan assert failed")
#endif
#include "EASTL/intrusive_ptr.h"
#include "type_traits.h"
#include <EASTL/fixed_string.h>
@ -50,38 +55,70 @@ constexpr u32 ASTER_API_VERSION = VK_API_VERSION_1_3;
#define Take(ELEMENT) eastl::exchange(ELEMENT, {})
#define TODO(MSG) assert(false && ("Unimplemented: " MSG))
#define TODO(...) assert(!("Unimplemented: " __VA_ARGS__))
#define FIX(...) static_assert(!("Unimplemented: " __VA_ARGS__))
#define UNREACHABLE(...) assert(!("Unreachable: " __VA_ARGS__))
#define AbortIfFailed(RESULT) \
do \
{ \
vk::Result _checkResultValue_; \
ERROR_IF(Failed(_checkResultValue_ = static_cast<vk::Result>(RESULT)), "Cause: {}", _checkResultValue_) \
THEN_ABORT(_checkResultValue_); \
} while (false)
#define AbortIfFailedMV(RESULT, MSG, EXTRA) \
do \
{ \
vk::Result _checkResultValue_; \
ERROR_IF(Failed(_checkResultValue_ = static_cast<vk::Result>(RESULT)), MSG " Cause: {}", EXTRA, \
_checkResultValue_) \
THEN_ABORT(_checkResultValue_); \
} while (false)
#define AbortIfFailedM(RESULT, MSG) \
do \
{ \
auto _checkResultValue_ = static_cast<vk::Result>(RESULT); \
ERROR_IF(Failed(_checkResultValue_), MSG " Cause: {}", _checkResultValue_) THEN_ABORT(_checkResultValue_); \
} while (false)
[[nodiscard]] inline bool
Failed(const vk::Result result)
Failed(vk::Result const result)
{
return result != vk::Result::eSuccess;
}
namespace concepts
{
template <typename T>
concept VkHandle = vk::isVulkanHandleType<T>::value;
}
using NameString = eastl::fixed_string<char, 32, false>;
template <typename TFlagBits>
struct eastl::hash<vk::Flags<TFlagBits>> // NOLINT(*-dcl58-cpp)
{
[[nodiscard]] usize
operator()(const vk::Flags<TFlagBits> &val)
operator()(vk::Flags<TFlagBits> const &val)
{
return std::hash<u32>()(Cast<u32>(val));
return std::hash<u32>()(static_cast<u32>(val));
}
};
template <typename T>
[[nodiscard]] usize
HashAny(const T &val)
HashAny(T const &val)
{
return eastl::hash<std::remove_cvref_t<T>>()(val);
}
[[nodiscard]] inline usize
HashCombine(const usize hash0, const usize hash1)
HashCombine(usize const hash0, usize const hash1)
{
constexpr usize saltValue = 0x9e3779b9;
const usize tempVar = hash1 + saltValue + (hash0 << 6) + (hash0 >> 2);
usize const tempVar = hash1 + saltValue + (hash0 << 6) + (hash0 >> 2);
return hash0 ^ tempVar;
}
@ -104,32 +141,32 @@ struct Time
Update()
{
ERROR_IF(std::isnan(m_Elapsed), "Time not init.");
const auto newElapsed = glfwGetTime();
auto const newElapsed = glfwGetTime();
m_Delta = std::clamp(newElapsed - m_Elapsed, 0.0, MAX_DELTA);
m_Elapsed = newElapsed;
}
};
[[nodiscard]] constexpr usize
ClosestMultiple(const usize val, const usize of)
ClosestMultiple(usize const val, usize const of)
{
return of * ((val + of - 1) / of);
}
[[nodiscard]] constexpr u32
ClosestMultiple(const u32 val, const u32 of)
ClosestMultiple(u32 const val, u32 const of)
{
return of * ((val + of - 1) / of);
}
[[nodiscard]] constexpr bool
IsPowerOfTwo(const usize val)
IsPowerOfTwo(usize const val)
{
return val && !(val & (val - 1));
}
[[nodiscard]] constexpr bool
IsPowerOfTwo(const u32 val)
IsPowerOfTwo(u32 const val)
{
return val && !(val & (val - 1));
}
@ -149,10 +186,10 @@ ClosestLargerPowerOfTwo(usize val)
}
[[nodiscard]] constexpr usize
ClosestPowerOfTwo(const usize val)
ClosestPowerOfTwo(usize const val)
{
const usize largerPo2 = ClosestLargerPowerOfTwo(val);
const usize smallerPo2 = largerPo2 >> 1;
usize const largerPo2 = ClosestLargerPowerOfTwo(val);
usize const smallerPo2 = largerPo2 >> 1;
return (smallerPo2 + largerPo2 <= (val << 1)) ? largerPo2 : smallerPo2;
}
@ -170,10 +207,10 @@ ClosestLargerPowerOfTwo(u32 val)
}
[[nodiscard]] constexpr u32
ClosestPowerOfTwo(const u32 val)
ClosestPowerOfTwo(u32 const val)
{
const u32 largerPo2 = ClosestLargerPowerOfTwo(val);
const u32 smallerPo2 = largerPo2 >> 1;
u32 const largerPo2 = ClosestLargerPowerOfTwo(val);
u32 const smallerPo2 = largerPo2 >> 1;
return (smallerPo2 + largerPo2 <= (val << 1)) ? largerPo2 : smallerPo2;
}
@ -189,15 +226,20 @@ GetMaskOffset(u32 val)
return count;
}
template <>
struct fmt::formatter<vk::Result> : nested_formatter<std::string>
template <typename T>
concept VkToString = requires(T a) {
{ vk::to_string(a) } -> std::convertible_to<std::string>;
};
template <VkToString T>
struct fmt::formatter<T> : nested_formatter<std::string>
{
auto
// ReSharper disable once CppInconsistentNaming
format(vk::Result result, format_context &ctx) const
format(T result, format_context &ctx) const
{
return write_padded(ctx,
[this, result](auto out) { return v10::format_to(out, "{}", nested(to_string(result))); });
[this, result](auto out) { return fmt::format_to(out, "{}", nested(to_string(result))); });
}
};
@ -206,8 +248,14 @@ struct fmt::formatter<eastl::fixed_string<TType, TCount, TOverflow>> : nested_fo
{
auto
// ReSharper disable once CppInconsistentNaming
format(const eastl::fixed_string<TType, TCount, TOverflow> &str, format_context &ctx) const
format(eastl::fixed_string<TType, TCount, TOverflow> const &str, format_context &ctx) const
{
return write_padded(ctx, [this, str](auto out) { return v10::format_to(out, "{}", nested(str.c_str())); });
return write_padded(ctx, [this, str](auto out) { return fmt::format_to(out, "{}", nested(str.c_str())); });
}
};
template <typename T>
using Ref = eastl::shared_ptr<T>;
template <typename T>
using WeakRef = eastl::weak_ptr<T>;

View File

@ -1,137 +1,132 @@
// =============================================
// Aster: image.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "global.h"
struct StorageTexture;
struct Device;
[[nodiscard]] inline vk::Extent2D
ToExtent2D(const vk::Extent3D &extent)
ToExtent2D(vk::Extent3D const &extent)
{
return {extent.width, extent.height};
}
[[nodiscard]] inline vk::Extent3D
ToExtent3D(const vk::Extent2D &extent, const u32 depth)
ToExtent3D(vk::Extent2D const &extent, u32 const depth)
{
return {extent.width, extent.height, depth};
}
[[nodiscard]] inline vk::Offset2D
ToOffset2D(const vk::Extent3D &extent)
ToOffset2D(vk::Extent3D const &extent)
{
return {Cast<i32>(extent.width), Cast<i32>(extent.height)};
return {static_cast<i32>(extent.width), static_cast<i32>(extent.height)};
}
[[nodiscard]] inline vk::Offset3D
ToOffset3D(const vk::Extent3D &extent)
ToOffset3D(vk::Extent3D const &extent)
{
return {Cast<i32>(extent.width), Cast<i32>(extent.height), Cast<i32>(extent.depth)};
return {static_cast<i32>(extent.width), static_cast<i32>(extent.height), static_cast<i32>(extent.depth)};
}
struct Image
{
enum class FlagBits : u8
{
eSampled = 0x1,
eStorage = 0x2,
eCube = 0x4,
};
using Flags = vk::Flags<FlagBits>;
constexpr static Flags FLAGS = {};
Device const *m_Device = nullptr;
vk::Image m_Image = nullptr;
vk::ImageView m_View = nullptr;
VmaAllocation m_Allocation = nullptr;
vk::Extent3D m_Extent;
// Image.m_MipLevels_ is used for bookkeeping
// If the image is Invalid, the remaining data in Image is used intrusively by `GpuResourceManager`.
vk::Format m_Format;
u8 m_EmptyPadding_ = 0;
u8 m_Flags_ = 0;
Flags m_Flags_ = {};
u8 m_LayerCount = 0;
u8 m_MipLevels = 0;
[[nodiscard]] bool IsValid() const;
[[nodiscard]] bool IsOwned() const;
[[nodiscard]] u32 GetMipLevels() const;
[[nodiscard]] bool IsCommitted() const;
void SetCommitted(bool committed);
[[nodiscard]] bool
IsValid() const
{
return m_Image;
}
void Destroy(const Device *device);
[[nodiscard]] u32
GetMipLevels() const
{
return m_MipLevels;
}
constexpr static u8 VALID_BIT = 1u << 7;
constexpr static u8 OWNED_BIT = 1u << 6;
constexpr static u8 COMMITTED_BIT = 1u << 5;
void DestroyView(vk::ImageView imageView) const;
// Constructors.
explicit Image(Device const *device, vk::Image image, VmaAllocation allocation, vk::Extent3D extent,
vk::Format format, Flags flags, u8 layerCount, u8 mipLevels);
Image(Image &&other) noexcept;
Image &operator=(Image &&other) noexcept;
~Image();
DISALLOW_COPY_AND_ASSIGN(Image);
};
template <>
constexpr bool concepts::GpuResource<Image> = true;
struct Texture : Image
{
void Init(const Device *device, vk::Extent2D extent, vk::Format imageFormat, bool isMipMapped, cstr name = nullptr);
constexpr static Flags FLAGS = FlagBits::eSampled;
};
static_assert(sizeof(Texture) == sizeof(Image));
struct TextureCube : Texture
struct ImageCube : Image
{
void
Init(const Device *device, u32 cubeSide, vk::Format imageFormat, bool isMipMapped = false, cstr name = nullptr);
constexpr static Flags FLAGS = FlagBits::eCube;
};
static_assert(sizeof(TextureCube) == sizeof(Image));
struct AttachmentImage : Image
struct TextureCube : Image
{
void Init(const Device *device, vk::Extent2D extent, vk::Format imageFormat, cstr name = nullptr);
constexpr static Flags FLAGS = Texture::FLAGS | ImageCube::FLAGS;
};
static_assert(sizeof(AttachmentImage) == sizeof(Image));
struct DepthImage : Image
struct StorageImage : Image
{
void Init(const Device *device, vk::Extent2D extent, cstr name = nullptr);
constexpr static Flags FLAGS = FlagBits::eStorage;
};
static_assert(sizeof(DepthImage) == sizeof(Image));
struct StorageTexture : Texture
struct StorageTexture : StorageImage
{
void Init(const Device *device, vk::Extent2D extent, vk::Format imageFormat, bool isSampled, cstr name = nullptr);
constexpr static Flags FLAGS = StorageImage::FLAGS | Texture::FLAGS;
};
static_assert(sizeof(StorageTexture) == sizeof(Image));
struct StorageTextureCube : StorageTexture
struct StorageTextureCube : StorageImage
{
void Init(const Device *device, u32 cubeSide, vk::Format imageFormat, bool isSampled, bool isMipMapped = false,
cstr name = nullptr);
constexpr static Flags FLAGS = StorageImage::FLAGS | Texture::FLAGS | ImageCube::FLAGS;
};
static_assert(sizeof(StorageTextureCube) == sizeof(Image));
inline bool
Image::IsValid() const
namespace concepts
{
return m_Flags_ & VALID_BIT;
}
inline bool
Image::IsOwned() const
{
return m_Flags_ & OWNED_BIT;
}
template <typename T>
concept AnyImage = std::derived_from<T, Image>;
inline u32
Image::GetMipLevels() const
{
return m_MipLevels;
}
template <typename T, typename TInto>
concept ImageInto = std::derived_from<T, Image> and std::derived_from<TInto, Image> and
(static_cast<bool>(T::FLAGS & TInto::FLAGS) or std::same_as<Image, TInto>);
inline bool
Image::IsCommitted() const
{
return m_Flags_ & COMMITTED_BIT;
}
template <typename T>
concept AnyImageRef = Deref<T> and AnyImage<DerefType<T>>;
inline void
Image::SetCommitted(const bool committed)
{
m_Flags_ = committed ? (m_Flags_ | COMMITTED_BIT) : (m_Flags_ & ~COMMITTED_BIT);
}
template <typename T, typename TTo>
concept ImageRefTo = Deref<T> and ImageInto<DerefType<T>, TTo>;
} // namespace concepts

View File

@ -0,0 +1,108 @@
// =============================================
// Aster: image_view.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "global.h"
#include "image.h"
template <concepts::AnyImage TImage>
struct View
{
using ImageType = TImage;
Ref<Image> m_Image;
vk::ImageView m_View = nullptr;
vk::Extent3D m_Extent;
u8 m_BaseLayer = 0;
u8 m_LayerCount = 0;
u8 m_BaseMipLevel = 0;
u8 m_MipLevelCount = 0;
[[nodiscard]] vk::Image
GetImage() const
{
return m_Image->m_Image;
}
[[nodiscard]] bool
IsValid() const
{
return static_cast<bool>(m_Image);
}
View(Ref<Image> image, vk::ImageView const view, vk::Extent3D const extent, u8 const baseLayer, u8 const layerCount,
u8 const baseMipLevel, u8 const mipLevelCount)
: m_Image{std::move(image)}
, m_View{view}
, m_Extent{extent}
, m_BaseLayer{baseLayer}
, m_LayerCount{layerCount}
, m_BaseMipLevel{baseMipLevel}
, m_MipLevelCount{mipLevelCount}
{
}
View(View &&other) noexcept
: m_Image{std::move(other.m_Image)}
, m_View{Take(other.m_View)}
, m_Extent{std::move(other.m_Extent)}
, m_BaseLayer{other.m_BaseLayer}
, m_LayerCount{other.m_LayerCount}
, m_BaseMipLevel{other.m_BaseMipLevel}
, m_MipLevelCount{other.m_MipLevelCount}
{
}
View &
operator=(View &&other) noexcept
{
if (this == &other)
return *this;
using std::swap;
swap(m_Image, other.m_Image);
swap(m_View, other.m_View);
swap(m_Extent, other.m_Extent);
swap(m_BaseLayer, other.m_BaseLayer);
swap(m_LayerCount, other.m_LayerCount);
swap(m_BaseMipLevel, other.m_BaseMipLevel);
swap(m_MipLevelCount, other.m_MipLevelCount);
return *this;
}
DISALLOW_COPY_AND_ASSIGN(View);
~View()
{
if (!IsValid())
return;
m_Image->DestroyView(Take(m_View));
}
};
using ImageView = View<Image>;
using ImageCubeView = View<ImageCube>;
using TextureView = View<Texture>;
using TextureCubeView = View<TextureCube>;
using StorageImageView = View<StorageImage>;
using StorageTextureView = View<StorageTexture>;
using StorageTextureCubeView = View<StorageTextureCube>;
namespace concepts
{
template <typename T>
concept View = std::derived_from<T, View<typename T::ImageType>>;
template <typename T, typename TTo>
concept ViewTo = View<T> and ImageInto<typename T::ImageType, TTo>;
template <typename T>
concept ViewRef = Deref<T> and View<DerefType<T>>;
template <typename T, typename TTo>
concept ViewRefTo = ViewRef<T> and ImageInto<typename DerefType<T>::ImageType, TTo>;
} // namespace concepts

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: context.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
@ -8,25 +8,26 @@
#include "global.h"
/**
* @class Context
* @class Instance
*
* @brief Vulkan context to handle device initialization logic.
*
* Handles the required hardware interactions.
*/
struct Context final
struct Instance final
{
// Members
vk::Instance m_Instance = nullptr;
vk::DebugUtilsMessengerEXT m_DebugMessenger = nullptr;
// Ctor/Dtor
Context(cstr appName, Version version, bool enableValidation = ENABLE_LAYER_MESSAGES_DEFAULT_VALUE);
~Context();
Instance() = default;
Instance(cstr appName, Version version, bool enableValidation = ENABLE_LAYER_MESSAGES_DEFAULT_VALUE);
~Instance();
// Move
Context(Context &&other) noexcept;
Context &operator=(Context &&other) noexcept;
Instance(Instance &&other) noexcept;
Instance &operator=(Instance &&other) noexcept;
#if !defined(ASTER_NDEBUG)
constexpr static bool ENABLE_LAYER_MESSAGES_DEFAULT_VALUE = true;
@ -34,5 +35,5 @@ struct Context final
constexpr static bool ENABLE_LAYER_MESSAGES_DEFAULT_VALUE = false;
#endif
DISALLOW_COPY_AND_ASSIGN(Context);
DISALLOW_COPY_AND_ASSIGN(Instance);
};

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: physical_device.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
@ -8,10 +8,12 @@
#include "global.h"
#include "surface.h"
#include <sstream>
#include <EASTL/fixed_vector.h>
struct Window;
struct Context;
struct Instance;
enum class QueueSupportFlagBits
{
@ -23,6 +25,31 @@ enum class QueueSupportFlagBits
using QueueSupportFlags = vk::Flags<QueueSupportFlagBits>;
inline std::string
// ReSharper disable once CppInconsistentNaming
format_as(QueueSupportFlags const &qfi)
{
std::stringstream sb;
if (qfi & QueueSupportFlagBits::eGraphics)
{
sb << "Graphics | ";
}
if (qfi & QueueSupportFlagBits::eTransfer)
{
sb << "Transfer | ";
}
if (qfi & QueueSupportFlagBits::eCompute)
{
sb << "Compute | ";
}
if (qfi & QueueSupportFlagBits::ePresent)
{
sb << "Present | ";
}
auto const sbv = sb.view();
return std::string(sbv.substr(0, sbv.size() - 3));
}
struct QueueFamilyInfo
{
u32 m_Index;
@ -30,6 +57,12 @@ struct QueueFamilyInfo
QueueSupportFlags m_Support;
};
inline std::string
format_as(QueueFamilyInfo const &qfi)
{
return fmt::format("Queue {}: Count={} Support={}", qfi.m_Index, qfi.m_Count, qfi.m_Support);
}
[[nodiscard]] vk::SurfaceCapabilitiesKHR
GetSurfaceCapabilities(vk::PhysicalDevice physicalDevice, vk::SurfaceKHR surface);
@ -48,11 +81,12 @@ struct PhysicalDevice final
eastl::vector<vk::PresentModeKHR> m_PresentModes;
eastl::vector<QueueFamilyInfo> m_QueueFamilies;
PhysicalDevice() = default;
PhysicalDevice(vk::SurfaceKHR surface, vk::PhysicalDevice physicalDevice);
};
class PhysicalDevices : public eastl::fixed_vector<PhysicalDevice, 4>
{
public:
PhysicalDevices(const Surface *surface, const Context *context);
PhysicalDevices(Surface const &surface, Instance const &context);
};

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: pipeline.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
@ -13,12 +13,45 @@ struct Device;
struct Pipeline
{
const Device *m_Device;
vk::PipelineLayout m_Layout;
vk::Pipeline m_Pipeline;
eastl::vector<vk::DescriptorSetLayout> m_SetLayouts;
enum class Kind
{
eGraphics,
eCompute,
};
Pipeline(const Device *device, vk::PipelineLayout layout, vk::Pipeline pipeline,
eastl::vector<vk::DescriptorSetLayout> &&setLayouts);
Device const *m_Device = nullptr;
vk::PipelineLayout m_Layout;
vk::Pipeline m_Pipeline = nullptr;
eastl::vector<vk::DescriptorSetLayout> m_SetLayouts;
Kind m_Kind;
Pipeline() = default;
Pipeline(Device const *device, vk::PipelineLayout layout, vk::Pipeline pipeline,
eastl::vector<vk::DescriptorSetLayout> &&setLayouts, Kind kind);
~Pipeline();
DISALLOW_COPY_AND_ASSIGN(Pipeline);
Pipeline(Pipeline &&other) noexcept
: m_Device{other.m_Device}
, m_Layout{Take(other.m_Layout)}
, m_Pipeline{Take(other.m_Pipeline)}
, m_SetLayouts{std::move(other.m_SetLayouts)}
, m_Kind{other.m_Kind}
{
}
Pipeline &
operator=(Pipeline &&other) noexcept
{
if (this == &other)
return *this;
using eastl::swap;
swap(m_Device, other.m_Device);
swap(m_Layout, other.m_Layout);
swap(m_Pipeline, other.m_Pipeline);
swap(m_SetLayouts, other.m_SetLayouts);
swap(m_Kind, other.m_Kind);
return *this;
}
};

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: queue_allocation.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once

View File

@ -0,0 +1,32 @@
// =============================================
// Aster: sampler.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "global.h"
struct Device;
struct Sampler final
{
Device const *m_Device = nullptr;
vk::Sampler m_Sampler = nullptr;
[[nodiscard]] bool
IsValid() const
{
return m_Sampler;
}
// Constructors
Sampler(Device const *device, vk::SamplerCreateInfo const &samplerCreateInfo, cstr name);
~Sampler();
Sampler(Sampler &&other) noexcept;
Sampler &operator=(Sampler &&other) noexcept;
DISALLOW_COPY_AND_ASSIGN(Sampler);
};

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: size.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
@ -9,10 +9,33 @@
struct Size2D
{
u32 m_Width;
u32 m_Height;
u32 m_Width = 0;
u32 m_Height = 0;
Size2D() = default;
Size2D(u32 const width, u32 const height)
: m_Width{width}
, m_Height{height}
{
}
Size2D(vk::Extent2D const extent)
: m_Width{extent.width}
, m_Height{extent.height}
{
}
Size2D &
operator=(vk::Extent2D const other)
{
m_Height = other.height;
m_Width = other.width;
return *this;
}
bool operator==(Size2D const &) const = default;
explicit
operator vk::Extent2D() const
{
return {m_Width, m_Height};

View File

@ -1,23 +1,24 @@
// =============================================
// Aster: surface.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "global.h"
struct Context;
struct Instance;
struct Window;
struct Surface
{
Context *m_Context;
Instance *m_Context;
vk::SurfaceKHR m_Surface;
NameString m_Name;
// Ctor Dtor
Surface(Context *context, const Window *window, cstr name);
Surface() = default;
Surface(Instance &context, Window const &window);
~Surface();
// Move

View File

@ -1,6 +1,6 @@
/// =============================================
// Aster: swapchain.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// ==============================================
#pragma once
@ -19,9 +19,8 @@ struct Swapchain final
{
using FnResizeCallback = eastl::function<void(vk::Extent2D)>;
const Device *m_Device;
Device const *m_Device;
vk::SwapchainKHR m_Swapchain;
NameString m_Name;
vk::Extent2D m_Extent;
vk::Format m_Format;
eastl::fixed_vector<vk::Image, 4> m_Images;
@ -29,11 +28,12 @@ struct Swapchain final
eastl::vector<FnResizeCallback> m_ResizeCallbacks;
void Create(const Surface *window, Size2D size);
void Create(Surface const &surface, Size2D size);
void RegisterResizeCallback(FnResizeCallback &&callback);
// Ctor/Dtor
Swapchain(const Surface *window, const Device *device, Size2D size, NameString &&name);
Swapchain() = default;
Swapchain(Surface const &surface, Device const &device, Size2D size);
~Swapchain();
// Move
@ -42,6 +42,6 @@ struct Swapchain final
DISALLOW_COPY_AND_ASSIGN(Swapchain);
private:
private:
void Cleanup();
};

View File

@ -1,36 +1,28 @@
// =============================================
// Aster: type_traits.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "constants.h"
struct Device;
struct Image;
namespace concepts
{
template <typename T>
concept DeviceDestructible = requires(T a, Device *p) {
{ a.Destroy(p) } -> std::convertible_to<void>;
concept Deref = requires(T a) {
{ *a };
};
template <typename T>
concept Committable = requires(T a, bool v) {
{ a.IsCommitted() } -> std::convertible_to<bool>;
{ a.SetCommitted(v) } -> std::convertible_to<void>;
template <typename TRef, typename TVal>
concept DerefTo = requires(TRef a) {
{ *a } -> std::convertible_to<TVal>;
};
template <typename T>
constexpr bool GpuResource = false;
template <typename T>
concept RenderResource = GpuResource<T> and std::is_default_constructible_v<T> and std::is_trivially_copyable_v<T> and
DeviceDestructible<T> and Committable<T>;
template <typename T>
constexpr bool IsHandle = false;
template <typename THandle>
concept HandleType = IsHandle<THandle> and RenderResource<typename THandle::Type>;
template <Deref T>
using DerefType = std::remove_cvref_t<decltype(*std::declval<T>())>;
} // namespace concepts

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: window.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
@ -20,6 +20,8 @@ struct Window final
static std::atomic_uint64_t m_WindowCount;
static std::atomic_bool m_IsGlfwInit;
static void SetupLibrary();
static cstr *GetInstanceExtensions(u32 *extensionCount);
// Methods
[[nodiscard]] bool
@ -30,7 +32,7 @@ struct Window final
}
void RequestExit() const noexcept;
void SetWindowSize(const vk::Extent2D &extent) const noexcept;
void SetWindowSize(vk::Extent2D const &extent) const noexcept;
void SetWindowSize(u32 width, u32 height) const noexcept;
/// Actual size of the framebuffer being used for the window render.
[[nodiscard]] Size2D GetSize() const;

View File

@ -4,7 +4,7 @@ cmake_minimum_required(VERSION 3.13)
target_sources(aster_core
INTERFACE
"manager.h"
"buffer_manager.h"
"image_manager.h"
"render_resource_manager.h")
"rendering_device.h"
"resource.h"
"context.h"
"commit_manager.h")

View File

@ -1,24 +0,0 @@
// =============================================
// Aster: buffer_manager.h
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "aster/aster.h"
#include "aster/core/buffer.h"
#include "manager.h"
namespace systems
{
using BufferHandle = Handle<Buffer>;
class BufferManager final : public Manager<Buffer>
{
public:
BufferManager(const Device *device, const u32 maxCount, const u8 binding);
[[nodiscard]] Handle CreateStorageBuffer(usize size, cstr name = nullptr);
[[nodiscard]] Handle CreateUniformBuffer(usize size, cstr name = nullptr);
};
} // namespace systems

View File

@ -0,0 +1,375 @@
// =============================================
// Aster: render_resource_manager.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "aster/aster.h"
#include "aster/util/freelist.h"
#include "EASTL/deque.h"
#include "EASTL/intrusive_hash_map.h"
#include "EASTL/vector.h"
#include "aster/core/buffer.h"
#include "aster/core/image_view.h"
#include "aster/core/sampler.h"
#include "resource.h"
namespace systems
{
class RenderingDevice;
class CommitManager
{
template <typename T>
struct HandleMapper
{
using Type = T;
using Handle = Ref<Type>;
using Resource = ResId<Type>;
struct Entry : eastl::intrusive_hash_node_key<Handle>
{
std::atomic<u32> m_CommitCount;
void
AddRef()
{
auto const rc = ++m_CommitCount;
assert(rc > 0);
}
void
Release()
{
auto const rc = --m_CommitCount;
assert(rc < MaxValue<u32>);
}
u32
IsReferenced() const
{
return m_CommitCount;
}
bool
operator==(Entry const &other) const
{
return this->mKey == other.mKey;
}
Entry *
Next()
{
return reinterpret_cast<Entry *>(this->mpNext);
}
void
SetNext(Entry &entry)
{
this->mpNext = &entry;
}
struct Hash
{
usize
operator()(Handle const &e)
{
return eastl::hash<Type *>()(e.get());
}
};
};
eastl::vector<Entry> m_Data;
FreeList<Entry> m_FreeList;
eastl::intrusive_hash_map<typename Entry::key_type, Entry, 31, typename Entry::Hash> m_InUse;
std::array<FreeList<Entry>, 4> m_ToDelete;
u8 m_ToDeleteIndex = 0;
explicit HandleMapper(u32 const maxCount)
: m_Data{maxCount}
{
// Setup freelist
for (auto it = m_Data.rbegin(); it != m_Data.rend(); ++it)
{
m_FreeList.Push(*it);
}
}
~HandleMapper()
{
for (auto &toDelete : m_ToDelete)
{
ClearEntries(toDelete);
}
}
PIN_MEMORY(HandleMapper);
/// Returns a commit, and a bool signifying if it is a new commit.
std::tuple<Resource, bool>
Create(Handle const &object)
{
// Get-from freelist
assert(!m_FreeList.Empty());
auto it = m_InUse.find(object);
if (it != m_InUse.end())
{
it->AddRef();
auto i = GetIndex(*it);
return {Resource{i}, false};
}
Entry &data = m_FreeList.Pop();
data.mKey = object;
data.m_CommitCount = 0;
m_InUse.insert(data);
auto i = GetIndex(data);
return {Resource{i}, true};
}
Handle
GetHandle(Resource const &res)
{
return m_Data[res.m_Index].mKey;
}
void
AddRef(Resource const &commit)
{
m_Data.at(commit.m_Index).AddRef();
}
void
Release(Resource const &commit)
{
auto &entry = m_Data.at(commit.m_Index);
entry.Release();
if (!entry.IsReferenced())
{
QueueDelete(entry);
}
}
/**
* Sweeps through the delete queue.
* All freed items are cleared. (With a 3 frame delay)
*/
void
Update()
{
m_ToDeleteIndex = (m_ToDeleteIndex + 1) % m_ToDelete.size();
auto &list = m_ToDelete[m_ToDeleteIndex];
ClearEntries(list);
}
private:
u32
GetIndex(Entry const &entry)
{
return static_cast<u32>(&entry - m_Data.begin());
}
void
QueueDelete(Entry &entry)
{
m_InUse.remove(entry);
m_ToDelete[m_ToDeleteIndex].Push(entry);
}
void
ClearEntries(FreeList<Entry> &entries)
{
while (!entries.Empty())
{
Entry &entry = entries.Pop();
entry.mKey.reset();
entry.m_CommitCount = 0;
}
}
};
union WriteInfo {
vk::DescriptorBufferInfo uBufferInfo;
vk::DescriptorImageInfo uImageInfo;
vk::BufferView uBufferView;
explicit WriteInfo(vk::DescriptorBufferInfo const &info);
explicit WriteInfo(vk::DescriptorImageInfo const &info);
explicit WriteInfo(vk::BufferView const &info);
};
using WriteCommand = vk::WriteDescriptorSet;
// using WriteOwner = std::variant<Handle<Buffer>, Handle<Image>>;
public:
RenderingDevice const *m_Device;
CommitManager(RenderingDevice const *device, u32 maxBuffers, u32 maxImages, u32 maxStorageImages,
Ref<Sampler> defaultSampler);
~CommitManager();
PIN_MEMORY(CommitManager);
// Commit Buffer
private:
ResId<Buffer> CommitBuffer(Ref<Buffer> const &buffer);
public:
// Commit Storage Images
ResId<StorageImageView>
CommitStorageImage(concepts::ViewRefTo<StorageImage> auto const &image)
{
return CommitStorageImage(CastView<StorageImageView>(image));
}
ResId<StorageImageView> CommitStorageImage(Ref<StorageImageView> const &image);
// Sampled Images
ResId<TextureView>
CommitTexture(concepts::ViewRefTo<Texture> auto const &image, Ref<Sampler> const &sampler)
{
return CommitTexture(CastView<TextureView>(image), sampler);
}
ResId<TextureView>
CommitTexture(concepts::ViewRefTo<Texture> auto const &image)
{
return CommitTexture(CastView<TextureView>(image));
}
ResId<TextureView> CommitTexture(Ref<TextureView> const &handle);
ResId<TextureView> CommitTexture(Ref<TextureView> const &image, Ref<Sampler> const &sampler);
void Update();
Ref<Buffer>
FetchHandle(ResId<Buffer> const &id)
{
return m_Buffers.GetHandle(id);
}
Ref<TextureView>
FetchHandle(ResId<TextureView> const &id)
{
return m_Images.GetHandle(id);
}
Ref<StorageImageView>
FetchHandle(ResId<StorageImageView> const &id)
{
return m_StorageImages.GetHandle(id);
}
[[nodiscard]] vk::DescriptorSetLayout const &
GetDescriptorSetLayout() const
{
return m_SetLayout;
}
[[nodiscard]] vk::DescriptorSet const &
GetDescriptorSet() const
{
return m_DescriptorSet;
}
static CommitManager &
Instance()
{
assert(m_Instance);
return *m_Instance;
}
static bool
IsInit()
{
return static_cast<bool>(m_Instance);
}
private:
vk::DescriptorPool m_DescriptorPool;
vk::DescriptorSetLayout m_SetLayout;
vk::DescriptorSet m_DescriptorSet;
constexpr static u8 BUFFER_BINDING_INDEX = 0x0;
constexpr static u8 IMAGE_BINDING_INDEX = 0x1;
constexpr static u8 STORAGE_IMAGE_BINDING_INDEX = 0x2;
HandleMapper<Buffer> m_Buffers;
HandleMapper<TextureView> m_Images;
HandleMapper<StorageImageView> m_StorageImages;
Ref<Sampler> m_DefaultSampler;
eastl::vector<vk::WriteDescriptorSet> m_Writes;
eastl::deque<WriteInfo> m_WriteInfos;
// eastl::vector<WriteOwner> m_WriteOwner;
static CommitManager *m_Instance;
friend ResId<Buffer>;
friend ResId<TextureView>;
friend ResId<StorageImageView>;
void
AddRef(ResId<Buffer> const &handle)
{
m_Buffers.AddRef(handle);
}
void
AddRef(ResId<TextureView> const &handle)
{
m_Images.AddRef(handle);
}
void
AddRef(ResId<StorageImageView> const &handle)
{
m_StorageImages.AddRef(handle);
}
void
Release(ResId<Buffer> const &handle)
{
m_Buffers.Release(handle);
}
void
Release(ResId<TextureView> const &handle)
{
m_Images.Release(handle);
}
void
Release(ResId<StorageImageView> const &handle)
{
m_StorageImages.Release(handle);
}
};
template <typename T>
void
ResId<T>::AddRef() const
{
if (m_Index != INVALID)
CommitManager::Instance().AddRef(*this);
}
template <typename T>
void
ResId<T>::Release() const
{
if (m_Index != INVALID)
CommitManager::Instance().Release(*this);
}
} // namespace systems

View File

@ -0,0 +1,471 @@
// =============================================
// Aster: context_pool.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "EASTL/span.h"
#include "context.h"
#include <aster/aster.h>
#include <aster/core/buffer.h>
#include <aster/core/image.h>
#include <aster/core/image_view.h>
#include <aster/core/physical_device.h>
#include <aster/core/pipeline.h>
#include <EASTL/intrusive_list.h>
#include <EASTL/optional.h>
#include <EASTL/vector.h>
#include <foonathan/memory/memory_pool.hpp>
#include <foonathan/memory/namespace_alias.hpp>
namespace systems
{
class RenderingDevice;
struct Frame;
namespace _internal
{
class ComputeContextPool;
class GraphicsContextPool;
class TransferContextPool;
class ContextPool;
} // namespace _internal
#define DEPRECATE_RAW_CALLS
class Context
{
protected:
_internal::ContextPool *m_Pool;
vk::CommandBuffer m_Cmd;
friend RenderingDevice;
friend _internal::ContextPool;
explicit Context(_internal::ContextPool &pool, vk::CommandBuffer const cmd)
: m_Pool{&pool}
, m_Cmd{cmd}
{
}
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(Ref<Buffer> const &buffer);
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(Ref<Image> const &image);
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(Ref<ImageView> const &view);
public:
DEPRECATE_RAW_CALLS void Dependency(vk::DependencyInfo const &dependencyInfo);
void Begin();
void End();
void BeginDebugRegion(cstr name, vec4 color = {});
void EndDebugRegion();
};
// Inline the no-op if not debug.
#if defined(ASTER_NDEBUG)
inline void
Context::BeginDebugRegion(cstr name, vec4 color)
{
}
inline void
Context::EndDebugRegion()
{
}
#endif
class TransferContext : public Context
{
protected:
friend RenderingDevice;
friend _internal::TransferContextPool;
explicit TransferContext(_internal::ContextPool &pool, vk::CommandBuffer const cmd)
: Context{pool, cmd}
{
}
void UploadBuffer(Ref<Buffer> const &buffer, usize size, void const *data);
public:
void UploadTexture(Ref<Image> const &image, eastl::span<u8> const &data);
void
UploadBuffer(Ref<Buffer> const &buffer, std::ranges::range auto const &data)
{
auto const span = eastl::span{data.begin(), data.end()};
UploadBuffer(buffer, span.size_bytes(), span.data());
}
DEPRECATE_RAW_CALLS void Blit(vk::BlitImageInfo2 const &mipBlitInfo);
TransferContext(TransferContext &&other) noexcept;
TransferContext &operator=(TransferContext &&other) noexcept;
~TransferContext() = default;
DISALLOW_COPY_AND_ASSIGN(TransferContext);
};
class ComputeContext : public TransferContext
{
protected:
friend RenderingDevice;
friend _internal::ComputeContextPool;
Pipeline const *m_PipelineInUse;
explicit ComputeContext(_internal::ContextPool &pool, vk::CommandBuffer const cmd)
: TransferContext{pool, cmd}
, m_PipelineInUse{nullptr}
{
}
void PushConstantBlock(usize offset, usize size, void const *data);
void Dispatch(Pipeline const &pipeline, u32 x, u32 y, u32 z, usize size, void *data);
public:
void BindPipeline(Pipeline const &pipeline);
void
PushConstantBlock(auto const &block)
{
if constexpr (sizeof block > 128)
WARN("Vulkan only guarantees 128 bytes of Push Constants. Size of PCB is {}", sizeof block);
PushConstantBlock(0, sizeof block, &block);
}
void
PushConstantBlock(usize const offset, auto const &block)
{
if (offset + sizeof block > 128)
WARN("Vulkan only guarantees 128 bytes of Push Constants. Size of PCB is {}, at offset {}", sizeof block,
offset);
PushConstantBlock(offset, sizeof block, &block);
}
void
Dispatch(Pipeline const &pipeline, u32 const x, u32 const y, u32 const z, auto &pushConstantBlock)
{
if constexpr (sizeof pushConstantBlock > 128)
WARN("Vulkan only guarantees 128 bytes of Push Constants. Size of PCB is {}", sizeof pushConstantBlock);
Dispatch(pipeline, x, y, z, sizeof pushConstantBlock, &pushConstantBlock);
}
};
class GraphicsContext : public ComputeContext
{
protected:
friend RenderingDevice;
friend _internal::GraphicsContextPool;
explicit GraphicsContext(_internal::ContextPool &pool, vk::CommandBuffer const cmd)
: ComputeContext{pool, cmd}
{
}
public:
DEPRECATE_RAW_CALLS void SetViewport(vk::Viewport const &viewport);
void BindVertexBuffer(Ref<VertexBuffer> const &vertexBuffer);
void BindIndexBuffer(Ref<IndexBuffer> const &indexBuffer);
void Draw(usize vertexCount);
void DrawIndexed(usize indexCount);
void DrawIndexed(usize indexCount, usize firstIndex, usize firstVertex);
DEPRECATE_RAW_CALLS void BeginRendering(vk::RenderingInfo const &renderingInfo);
void EndRendering();
DEPRECATE_RAW_CALLS vk::CommandBuffer
GetCommandBuffer() const
{
return m_Cmd;
}
};
namespace _internal
{
class ContextPool
{
protected:
RenderingDevice *m_Device;
vk::CommandPool m_Pool;
eastl::vector<vk::CommandBuffer> m_CommandBuffers;
u32 m_BuffersAllocated;
public:
u16 m_ExtraData;
enum class ManagedBy : u8
{
eFrame,
eDevice,
} m_ManagedBy;
protected:
eastl::vector<Ref<Buffer>> m_OwnedBuffers;
eastl::vector<Ref<Image>> m_OwnedImages;
eastl::vector<Ref<ImageView>> m_OwnedImageViews;
vk::CommandBuffer AllocateCommandBuffer();
public:
[[nodiscard]] RenderingDevice &
GetDevice() const
{
assert(m_Device);
return *m_Device;
}
eastl::function<void(ContextPool &)> m_ResetCallback;
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(Ref<Buffer> const &buffer);
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(Ref<Image> const &image);
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(Ref<ImageView> const &view);
Context CreateContext();
void Reset();
ContextPool() = default;
ContextPool(RenderingDevice &device, u32 queueFamilyIndex, ManagedBy managedBy);
ContextPool(ContextPool &&other) noexcept;
ContextPool &operator=(ContextPool &&other) noexcept;
bool
operator==(ContextPool const &other) const
{
return m_Pool == other.m_Pool;
}
~ContextPool();
DISALLOW_COPY_AND_ASSIGN(ContextPool);
};
class TransferContextPool : public ContextPool
{
public:
TransferContext CreateTransferContext();
TransferContextPool() = default;
TransferContextPool(RenderingDevice &device, u32 const queueFamilyIndex, ManagedBy const managedBy)
: ContextPool{device, queueFamilyIndex, managedBy}
{
}
TransferContextPool(TransferContextPool &&other) noexcept = default;
TransferContextPool &operator=(TransferContextPool &&other) noexcept = default;
~TransferContextPool() = default;
DISALLOW_COPY_AND_ASSIGN(TransferContextPool);
};
class ComputeContextPool : public TransferContextPool
{
public:
ComputeContext CreateComputeContext();
ComputeContextPool() = default;
ComputeContextPool(RenderingDevice &device, u32 const queueFamilyIndex, ManagedBy const managedBy)
: TransferContextPool{device, queueFamilyIndex, managedBy}
{
}
ComputeContextPool(ComputeContextPool &&other) noexcept = default;
ComputeContextPool &operator=(ComputeContextPool &&other) noexcept = default;
~ComputeContextPool() = default;
DISALLOW_COPY_AND_ASSIGN(ComputeContextPool);
};
class GraphicsContextPool : public ComputeContextPool
{
public:
GraphicsContext CreateGraphicsContext();
GraphicsContextPool() = default;
GraphicsContextPool(RenderingDevice &device, u32 const queueFamilyIndex, ManagedBy const managedBy)
: ComputeContextPool{device, queueFamilyIndex, managedBy}
{
}
GraphicsContextPool(GraphicsContextPool &&other) noexcept = default;
GraphicsContextPool &operator=(GraphicsContextPool &&other) noexcept = default;
~GraphicsContextPool() = default;
DISALLOW_COPY_AND_ASSIGN(GraphicsContextPool);
};
template <std::derived_from<ContextPool> TContextPool>
class OrderlessContextPool
{
using ContextPoolType = TContextPool;
struct ContextListEntry : eastl::intrusive_list_node
{
ContextPoolType m_Pool;
bool
Contains(ContextPool const &other) const
{
return m_Pool == other;
}
};
using ContextListType = eastl::intrusive_list<ContextListEntry>;
RenderingDevice *m_Device;
memory::memory_pool<> m_ContextPoolEntryMemory;
ContextListType m_FreeContextPools;
ContextListType m_UsedContextPools;
u32 m_QueueFamilyIndex;
constexpr static usize ENTRY_SIZE = sizeof(ContextListEntry);
constexpr static usize ENTRIES_PER_BLOCK = 5;
constexpr static usize BLOCK_SIZE = ENTRIES_PER_BLOCK * ENTRY_SIZE;
public:
OrderlessContextPool()
: m_Device{nullptr}
, m_ContextPoolEntryMemory{ENTRY_SIZE, BLOCK_SIZE}
, m_QueueFamilyIndex{0}
{
}
void
Init(RenderingDevice &device, u32 const queueFamilyIndex)
{
m_Device = &device;
m_QueueFamilyIndex = queueFamilyIndex;
}
TransferContext
CreateTransferContext()
requires std::derived_from<TContextPool, TransferContextPool>
{
if (!m_FreeContextPools.empty())
{
ContextListEntry &entry = m_FreeContextPools.back();
m_FreeContextPools.pop_back();
m_UsedContextPools.push_back(entry);
return entry.m_Pool.CreateTransferContext();
}
ContextListEntry &entry = *static_cast<ContextListEntry *>(m_ContextPoolEntryMemory.allocate_node());
auto pool = ContextPoolType{*m_Device, m_QueueFamilyIndex, ContextPool::ManagedBy::eDevice};
pool.m_ResetCallback = [this](ContextPool &resetPool) { this->ReleasePool(resetPool); };
new (&entry) ContextListEntry{
.m_Pool = eastl::move(pool),
};
m_UsedContextPools.push_back(entry);
return entry.m_Pool.CreateTransferContext();
}
ComputeContext
CreateComputeContext()
requires std::derived_from<TContextPool, ComputeContextPool>
{
if (!m_FreeContextPools.empty())
{
ContextListEntry &entry = m_FreeContextPools.back();
m_FreeContextPools.pop_back();
m_UsedContextPools.push_back(entry);
return entry.m_Pool.CreateComputeContext();
}
ContextListEntry &entry = *static_cast<ContextListEntry *>(m_ContextPoolEntryMemory.allocate_node());
auto pool = ContextPoolType{*m_Device, m_QueueFamilyIndex, ContextPool::ManagedBy::eDevice};
pool.m_ResetCallback = [this](ContextPool &resetPool) { this->ReleasePool(resetPool); };
new (&entry) ContextListEntry{
.m_Pool = eastl::move(pool),
};
m_UsedContextPools.push_back(entry);
return entry.m_Pool.CreateComputeContext();
}
void
ReleasePool(ContextPool &pool)
{
auto const found = eastl::find_if(m_UsedContextPools.begin(), m_UsedContextPools.end(),
[&pool](ContextListEntry const &v) { return v.Contains(pool); });
auto &v = *found;
ContextListType::remove(v);
pool.Reset();
m_FreeContextPools.push_back(v);
}
OrderlessContextPool(OrderlessContextPool &&other) noexcept
: m_Device{other.m_Device}
, m_ContextPoolEntryMemory{std::move(other.m_ContextPoolEntryMemory)}
, m_FreeContextPools{other.m_FreeContextPools}
, m_UsedContextPools{other.m_UsedContextPools}
, m_QueueFamilyIndex{other.m_QueueFamilyIndex}
{
other.m_FreeContextPools.clear();
other.m_UsedContextPools.clear();
}
OrderlessContextPool &
operator=(OrderlessContextPool &&other) noexcept
{
if (this == &other)
return *this;
m_Device = other.m_Device;
m_ContextPoolEntryMemory = std::move(other.m_ContextPoolEntryMemory);
m_FreeContextPools = other.m_FreeContextPools;
other.m_FreeContextPools.clear();
m_UsedContextPools = other.m_UsedContextPools;
other.m_UsedContextPools.clear();
m_QueueFamilyIndex = other.m_QueueFamilyIndex;
return *this;
}
~OrderlessContextPool()
{
for (auto &entry : m_FreeContextPools)
{
entry.m_Pool.~ContextPoolType();
}
for (auto &entry : m_UsedContextPools)
{
entry.m_Pool.~ContextPoolType();
}
// The allocations will 'wink' away.
}
DISALLOW_COPY_AND_ASSIGN(OrderlessContextPool);
};
using OrderlessTransferContextPool = OrderlessContextPool<TransferContextPool>;
using OrderlessComputeContextPool = OrderlessContextPool<ComputeContextPool>;
} // namespace _internal
} // namespace systems

View File

@ -1,60 +0,0 @@
// =============================================
// Aster: image_manager.h
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "aster/aster.h"
#include "aster/core/image.h"
#include "manager.h"
namespace systems
{
struct Texture2DCreateInfo
{
vk::Format m_Format = vk::Format::eUndefined;
vk::Extent2D m_Extent = {};
cstr m_Name = nullptr;
bool m_IsSampled = true;
bool m_IsMipMapped = false;
bool m_IsStorage = false;
};
struct TextureCubeCreateInfo
{
vk::Format m_Format = vk::Format::eUndefined;
u32 m_Side = 0;
cstr m_Name = nullptr;
bool m_IsSampled = true;
bool m_IsMipMapped = false;
bool m_IsStorage = false;
};
struct AttachmentCreateInfo
{
vk::Format m_Format = vk::Format::eUndefined;
vk::Extent2D m_Extent = {};
cstr m_Name = nullptr;
};
struct DepthStencilImageCreateInfo
{
vk::Extent2D m_Extent = {};
cstr m_Name = nullptr;
};
using ImageHandle = Handle<Image>;
class ImageManager final : public Manager<Image>
{
public:
ImageManager(const Device *device, const u32 maxCount, const u8 binding);
[[nodiscard]] Handle CreateTexture2D(const Texture2DCreateInfo &createInfo);
[[nodiscard]] Handle CreateTextureCube(const TextureCubeCreateInfo &createInfo);
[[nodiscard]] Handle CreateAttachment(const AttachmentCreateInfo &createInfo);
[[nodiscard]] Handle CreateDepthStencilImage(const DepthStencilImageCreateInfo &createInfo);
};
} // namespace systems

View File

@ -1,361 +0,0 @@
// =============================================
// Aster: manager.h
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "aster/aster.h"
#include "aster/core/type_traits.h"
struct Device;
template <concepts::RenderResource T>
class Handle;
template <concepts::RenderResource T>
class Manager
{
friend Handle<T>;
public:
using Type = T;
using Handle = Handle<Type>;
static_assert(sizeof(Handle) == sizeof(u32));
constexpr static u32 MAX_HANDLES = Handle::INDEX_MASK + 1;
/**
* Constructor for the Manager class template.
* @param device Device with which resources are created.
* @param maxCount Max number of resources that can be created (maxCount <= Handle::INDEX_MASK)
* @param binding The shader binding at which this manager will bind its resources.
*/
explicit Manager(const Device *device, const u32 maxCount, const u8 binding)
: m_MaxCount{maxCount}
, m_Binding{binding}
, m_Device{device}
{
assert(!m_Instance);
assert(maxCount <= MAX_HANDLES);
m_Data = new Type[m_MaxCount];
m_RefCount = new std::atomic<u32>[m_MaxCount];
for (u32 i = 0; i < m_MaxCount; ++i)
{
*Recast<u32 *>(&m_Data[i]) = (i + 1);
}
m_Instance = this;
}
virtual ~Manager()
{
if (!m_Data)
return;
for (u32 i = 0; i < m_MaxCount; ++i)
{
m_Data[i].Destroy(m_Device);
}
delete[] m_Data;
delete[] m_RefCount;
m_Data = nullptr;
m_RefCount = nullptr;
m_MaxCount = 0;
m_FreeHead = 0;
m_Device = nullptr;
m_Instance = nullptr;
}
/**
* @warning only to be used internally.
* @return The only constructed instance of this manager.
*/
static Manager *
Instance()
{
assert(m_Instance);
return m_Instance;
}
PIN_MEMORY(Manager);
private:
Type *m_Data = nullptr; // Data also keeps the freelist during 'not use'.
std::atomic<u32> *m_RefCount = nullptr; // Associated reference count for each of the instances in Data.
u32 m_MaxCount = 0; // Max number of resources supported.
u32 m_FreeHead = 0;
u8 m_Binding = 0;
static Manager *m_Instance;
/**
* User is expected to type-check.
* @param index Actual index of the resource in the m_Data array. Not type checked.
*/
void
AddRef(const u32 index)
{
assert(index < m_MaxCount);
++m_RefCount[index];
}
/**
* User is expected to type-check.
* @param index Actual index of the resource in the m_Data array. Not type checked.
*/
void
Release(const u32 index)
{
assert(index < m_MaxCount);
const u32 rc = --m_RefCount[index];
assert(rc != MaxValue<u32>);
if (rc == 0)
{
// TODO: Don't destroy here. Separate out to a cleanup routine.
m_Data[index].Destroy(m_Device);
}
}
/**
* User is expected to type-check.
* @param index Actual index of the resource in the m_Data array. Not type checked.
* @return Pointer to the resource at the index.
*/
Type *
Fetch(const u32 index)
{
assert(index < m_MaxCount);
return &m_Data[index];
}
protected:
const Device *m_Device;
/**
* Internal Method to Allocate a resource on the manager.
* @return [Handle, Type*] Where Type* is available to initialize the resource.
*/
[[nodiscard]] std::pair<Handle, Type *>
Alloc()
{
ERROR_IF(m_FreeHead >= m_MaxCount, "Max buffers allocated.") THEN_ABORT(-1);
const auto index = m_FreeHead;
Type *pAlloc = &m_Data[index];
m_FreeHead = *Recast<u32 *>(pAlloc);
return {Handle{index, m_Binding}, pAlloc};
}
};
template <concepts::RenderResource T>
class Ref
{
public:
using Type = T;
using Handle = Handle<Type>;
using Manager = Manager<Type>;
protected:
Handle m_Handle;
Type *m_Pointer = nullptr;
friend Handle;
void
InitPtr()
{
m_Pointer = m_Handle.Fetch();
}
public:
Type *
Get()
{
assert(m_Pointer);
return m_Pointer;
}
const Type *
Get() const
{
assert(m_Pointer);
return m_Pointer;
}
Type *
operator->()
{
return Get();
}
const Type *
operator->() const
{
return Get();
}
Type &
operator*()
{
return *Get();
}
const Type &
operator*() const
{
return Get();
}
// The only constructor requires a valid construction.
explicit Ref(Handle &&handle)
: m_Handle{std::forward<Handle>(handle)}
{
InitPtr();
}
// The only constructor requires a valid construction.
explicit Ref(const Handle &&handle)
: m_Handle{handle}
{
InitPtr();
}
Ref(const Ref &other) = default;
Ref(Ref &&other) noexcept = default;
Ref &operator=(const Ref &other) = default;
Ref &operator=(Ref &&other) noexcept = default;
~Ref() = default;
};
class RawHandle
{
protected:
constexpr static u32 INVALID_HANDLE = MaxValue<u32>;
constexpr static u32 INDEX_MASK = 0x0FFFFFFF;
constexpr static u32 TYPE_MASK = ~INDEX_MASK;
constexpr static u32 TYPE_OFFSET = GetMaskOffset(TYPE_MASK);
u32 m_Internal = INVALID_HANDLE;
RawHandle(const u32 index, const u8 typeId)
: m_Internal{(index & INDEX_MASK) | (typeId & TYPE_MASK)}
{
}
explicit RawHandle(const u32 internal)
: m_Internal{internal}
{
}
public:
[[nodiscard]] bool
IsValid() const
{
return m_Internal != INVALID_HANDLE;
}
[[nodiscard]] u32
GetIndex() const
{
return m_Internal & INDEX_MASK;
}
[[nodiscard]] u32
GetType() const
{
return (m_Internal & TYPE_MASK) >> TYPE_OFFSET;
}
bool
operator==(const RawHandle &other) const
{
return m_Internal == other.m_Internal;
}
};
template <concepts::RenderResource T>
class Handle : public RawHandle
{
public:
using Type = T;
using Manager = Manager<Type>;
protected:
// The only constructor requires a valid construction.
Handle(const u32 index, const u8 typeId)
: RawHandle{index, typeId}
{
AddRef();
}
friend Manager;
friend Ref<T>;
public:
Handle(const Handle &other)
: RawHandle{other}
{
AddRef();
}
Handle(Handle &&other) noexcept
: RawHandle{std::exchange(other.m_Internal, m_Internal)}
{
}
[[nodiscard]] Ref<T>
ToPointer()
{
return Ref{std::move(*this)};
}
[[nodiscard]] Type *
Fetch() const
{
return Manager::Instance()->Fetch(m_Internal);
}
Handle &
operator=(const Handle &other)
{
if (this == &other)
return *this;
m_Internal = other.m_Internal;
AddRef();
return *this;
}
Handle &
operator=(Handle &&other) noexcept
{
if (this == &other)
return *this;
std::swap(m_Internal, other.m_Internal);
return *this;
}
~Handle()
{
if (m_Internal != INVALID_HANDLE)
{
Release();
}
}
protected:
void
AddRef()
{
Manager::Instance()->AddRef(GetIndex());
}
void
Release()
{
Manager::Instance()->Release(GetIndex());
}
};

View File

@ -0,0 +1,75 @@
// =============================================
// Aster: pipeline_helpers.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include <aster/aster.h>
#include <EASTL/vector.h>
#include <slang.h>
#include <variant>
namespace systems
{
class RenderingDevice;
struct PipelineCreationError
{
std::variant<std::monostate, vk::Result, SlangResult> m_Data;
std::string What();
i32 Value();
operator bool() const;
PipelineCreationError(vk::Result res);
PipelineCreationError(SlangResult res);
PipelineCreationError();
};
vk::ShaderStageFlagBits SlangToVulkanShaderStage(SlangStage stage);
namespace _internal
{
struct PipelineLayoutBuilder
{
RenderingDevice *m_Device;
eastl::vector<vk::DescriptorSetLayout> m_DescriptorSetLayouts;
eastl::vector<vk::PushConstantRange> m_PushConstants;
vk::ShaderStageFlags m_Stage;
explicit PipelineLayoutBuilder(RenderingDevice *device, vk::DescriptorSetLayout bindlessLayout = {});
[[nodiscard]] vk::PipelineLayout Build();
[[nodiscard]] vk::DescriptorSetLayout
CreateDescriptorSetLayout(vk::DescriptorSetLayoutCreateInfo const &createInfo) const;
void AddDescriptorSetForParameterBlock(slang::TypeLayoutReflection *layout);
void AddPushConstantRangeForConstantBuffer(slang::TypeLayoutReflection *layout);
void AddSubObjectRange(slang::TypeLayoutReflection *layout, i64 subObjectRangeIndex);
void AddSubObjectRanges(slang::TypeLayoutReflection *layout);
};
struct DescriptorLayoutBuilder
{
PipelineLayoutBuilder *m_PipelineLayoutBuilder;
eastl::vector<vk::DescriptorSetLayoutBinding> m_LayoutBindings;
u32 m_SetIndex;
vk::ShaderStageFlags &Stage() const;
explicit DescriptorLayoutBuilder(PipelineLayoutBuilder *pipelineLayoutBuilder);
void AddGlobalScopeParameters(slang::ProgramLayout *layout);
void AddEntryPointParameters(slang::ProgramLayout *layout);
void AddEntryPointParameters(slang::EntryPointLayout *layout);
void AddAutomaticallyIntroducedUniformBuffer();
void AddRanges(slang::TypeLayoutReflection *layout);
void AddRangesForParamBlockElement(slang::TypeLayoutReflection *layout);
void AddDescriptorRange(slang::TypeLayoutReflection *layout, i64 relativeSetIndex, i64 rangeIndex);
void AddDescriptorRanges(slang::TypeLayoutReflection *layout);
void Build();
};
} // namespace _internal
} // namespace systems

View File

@ -1,157 +0,0 @@
// =============================================
// Aster: render_resource_manager.h
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "aster/aster.h"
#include "buffer_manager.h"
#include "image_manager.h"
#include "EASTL/deque.h"
#include "EASTL/vector.h"
namespace systems
{
class RenderResourceManager
{
private:
union WriteInfo {
vk::DescriptorBufferInfo uBufferInfo;
vk::DescriptorImageInfo uImageInfo;
vk::BufferView uBufferView;
explicit WriteInfo(const vk::DescriptorBufferInfo &info);
explicit WriteInfo(const vk::DescriptorImageInfo &info);
explicit WriteInfo(const vk::BufferView &info);
};
using WriteCommand = vk::WriteDescriptorSet;
union WriteOwner {
Handle<Buffer> uBufferHandle;
Handle<Image> uImageHandle;
explicit WriteOwner(const Handle<Buffer> &handle);
explicit WriteOwner(const Handle<Image> &handle);
WriteOwner(const WriteOwner &other)
{
switch (uRawHandle.GetType())
{
case BUFFER_BINDING_INDEX:
uBufferHandle = other.uBufferHandle;
break;
case IMAGE_BINDING_INDEX:
uImageHandle = other.uImageHandle;
break;
default:
ERROR("Invalid Handle type.") THEN_ABORT(-1);
}
}
WriteOwner(WriteOwner &&other) noexcept
{
switch (uRawHandle.GetType())
{
case BUFFER_BINDING_INDEX:
uBufferHandle = std::move(other.uBufferHandle);
break;
case IMAGE_BINDING_INDEX:
uImageHandle = std::move(other.uImageHandle);
break;
default:
ERROR("Invalid Handle type.") THEN_ABORT(-1);
}
}
WriteOwner &
operator=(const WriteOwner &other)
{
if (this == &other)
return *this;
switch (uRawHandle.GetType())
{
case BUFFER_BINDING_INDEX:
uBufferHandle = other.uBufferHandle;
break;
case IMAGE_BINDING_INDEX:
uImageHandle = other.uImageHandle;
break;
default:
ERROR("Invalid Handle type.") THEN_ABORT(-1);
}
return *this;
}
WriteOwner &
operator=(WriteOwner &&other) noexcept
{
if (this == &other)
return *this;
switch (uRawHandle.GetType())
{
case BUFFER_BINDING_INDEX:
uBufferHandle = std::move(other.uBufferHandle);
break;
case IMAGE_BINDING_INDEX:
uImageHandle = std::move(other.uImageHandle);
break;
default:
ERROR("Invalid Handle type.") THEN_ABORT(-1);
}
return *this;
}
~WriteOwner()
{
switch (uRawHandle.GetType())
{
case BUFFER_BINDING_INDEX:
uBufferHandle.~Handle();
return;
case IMAGE_BINDING_INDEX:
uImageHandle.~Handle();
return;
default:
ERROR("Invalid Handle type.") THEN_ABORT(-1);
}
}
private:
RawHandle uRawHandle;
};
public:
RenderResourceManager(const Device *device, u32 maxBuffers, u32 maxImages);
void Commit(concepts::HandleType auto &handle);
private:
BufferManager m_BufferManager;
ImageManager m_ImageManager;
vk::DescriptorPool m_DescriptorPool;
vk::DescriptorSetLayout m_SetLayout;
vk::DescriptorSet m_DescriptorSet;
constexpr static u8 BUFFER_BINDING_INDEX = 0;
constexpr static u8 IMAGE_BINDING_INDEX = 1;
eastl::vector<vk::WriteDescriptorSet> m_Writes;
eastl::deque<WriteInfo> m_WriteInfos;
eastl::vector<WriteOwner> m_WriteOwner;
#if !defined(ASTER_NDEBUG)
usize m_CommitedBufferCount = 0;
usize m_CommitedTextureCount = 0;
usize m_CommitedStorageTextureCount = 0;
#endif
};
} // namespace systems

View File

@ -0,0 +1,646 @@
// =============================================
// Aster: rendering_device.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "context.h"
#include "pipeline_helpers.h"
#include "resource.h"
#include "aster/aster.h"
#include "aster/core/buffer.h"
#include "aster/core/device.h"
#include "aster/core/image.h"
#include "aster/core/image_view.h"
#include "aster/core/instance.h"
#include "aster/core/physical_device.h"
#include "aster/core/pipeline.h"
#include "aster/core/sampler.h"
#include "aster/core/size.h"
#include "aster/core/swapchain.h"
#include <EASTL/hash_map.h>
#include <EASTL/optional.h>
#include <EASTL/variant.h>
#include <slang-com-ptr.h>
#include <slang.h>
constexpr static u32 MAX_FRAMES_IN_FLIGHT = 3;
struct Window;
template <>
struct eastl::hash<vk::SamplerCreateInfo>
{
usize
operator()(vk::SamplerCreateInfo const &createInfo) const noexcept
{
usize hash = HashAny(createInfo.flags);
hash = HashCombine(hash, HashAny(createInfo.magFilter));
hash = HashCombine(hash, HashAny(createInfo.minFilter));
hash = HashCombine(hash, HashAny(createInfo.mipmapMode));
hash = HashCombine(hash, HashAny(createInfo.addressModeU));
hash = HashCombine(hash, HashAny(createInfo.addressModeV));
hash = HashCombine(hash, HashAny(createInfo.addressModeW));
hash = HashCombine(hash, HashAny(static_cast<usize>(createInfo.mipLodBias * 1000))); // Resolution of 10^-3
hash = HashCombine(hash, HashAny(createInfo.anisotropyEnable));
hash = HashCombine(
hash,
HashAny(static_cast<usize>(createInfo.maxAnisotropy * 0x20))); // 32:1 Anisotropy is enough resolution
hash = HashCombine(hash, HashAny(createInfo.compareEnable));
hash = HashCombine(hash, HashAny(createInfo.compareOp));
hash = HashCombine(hash, HashAny(static_cast<usize>(createInfo.minLod * 1000))); // 0.001 resolution is enough.
hash = HashCombine(
hash,
HashAny(static_cast<usize>(createInfo.maxLod * 1000))); // 0.001 resolution is enough. (1 == NO Clamp)
hash = HashCombine(hash, HashAny(createInfo.borderColor));
hash = HashCombine(hash, HashAny(createInfo.unnormalizedCoordinates));
return hash;
}
};
namespace systems
{
// ====================================================================================================
#pragma region Creation Structs
// ====================================================================================================
// ----------------------------------------------------------------------------------------------------
#pragma region Image
// ----------------------------------------------------------------------------------------------------
struct Texture2DCreateInfo
{
vk::Format m_Format = vk::Format::eUndefined;
vk::Extent2D m_Extent = {};
cstr m_Name = nullptr;
bool m_IsSampled = true;
bool m_IsMipMapped = false;
bool m_IsStorage = false;
};
struct TextureCubeCreateInfo
{
vk::Format m_Format = vk::Format::eUndefined;
u32 m_Side = 0;
cstr m_Name = nullptr;
bool m_IsSampled = true;
bool m_IsMipMapped = false;
bool m_IsStorage = false;
};
struct AttachmentCreateInfo
{
vk::Format m_Format = vk::Format::eUndefined;
vk::Extent2D m_Extent = {};
cstr m_Name = nullptr;
};
struct DepthStencilImageCreateInfo
{
vk::Extent2D m_Extent = {};
cstr m_Name = nullptr;
};
#pragma endregion
// ----------------------------------------------------------------------------------------------------
#pragma region View
// ----------------------------------------------------------------------------------------------------
template <concepts::AnyImage TImage>
struct ViewCreateInfo
{
using ImageType = TImage;
Ref<ImageType> m_Image;
cstr m_Name;
vk::ImageViewType m_ViewType = vk::ImageViewType::e2D;
vk::ComponentMapping m_Components = {};
vk::ImageAspectFlags m_AspectMask = {};
eastl::optional<u8> m_MipLevelCount = eastl::nullopt;
eastl::optional<u8> m_LayerCount = eastl::nullopt;
u8 m_BaseMipLevel = 0;
u8 m_BaseLayer = 0;
[[nodiscard]] u8
GetMipLevelCount() const
{
return m_MipLevelCount.value_or(m_Image->m_MipLevels - m_BaseMipLevel);
}
[[nodiscard]] u8
GetLayerCount() const
{
return m_LayerCount.value_or(m_Image->m_LayerCount - m_BaseLayer);
}
explicit
operator vk::ImageViewCreateInfo() const
{
return {
.image = m_Image->m_Image,
.viewType = m_ViewType,
.format = m_Image->m_Format,
.components = m_Components,
.subresourceRange =
{
.aspectMask = m_AspectMask,
.baseMipLevel = m_BaseMipLevel,
.levelCount = GetMipLevelCount(),
.baseArrayLayer = m_BaseLayer,
.layerCount = GetLayerCount(),
},
};
}
explicit
operator ViewCreateInfo<Image>() const
{
return {
.m_Image = CastImage<Image>(m_Image),
.m_Name = m_Name,
.m_ViewType = m_ViewType,
.m_Components = m_Components,
.m_AspectMask = m_AspectMask,
.m_MipLevelCount = m_MipLevelCount,
.m_LayerCount = m_LayerCount,
.m_BaseMipLevel = m_BaseMipLevel,
.m_BaseLayer = m_BaseLayer,
};
}
};
#pragma endregion
// ----------------------------------------------------------------------------------------------------
#pragma region Sampler
// ----------------------------------------------------------------------------------------------------
struct SamplerCreateInfo
{
cstr m_Name = nullptr;
vk::SamplerCreateFlags m_Flags = {};
vk::Filter m_MagFilter = vk::Filter::eLinear;
vk::Filter m_MinFilter = vk::Filter::eLinear;
vk::SamplerMipmapMode m_MipmapMode = vk::SamplerMipmapMode::eLinear;
vk::SamplerAddressMode m_AddressModeU = vk::SamplerAddressMode::eRepeat;
vk::SamplerAddressMode m_AddressModeV = vk::SamplerAddressMode::eRepeat;
vk::SamplerAddressMode m_AddressModeW = vk::SamplerAddressMode::eRepeat;
vk::BorderColor m_BorderColor = vk::BorderColor::eFloatOpaqueBlack;
vk::CompareOp m_CompareOp = vk::CompareOp::eNever;
f32 m_MipLodBias = 0.0f;
f32 m_MaxAnisotropy = 16.0f;
f32 m_MinLod = 0;
f32 m_MaxLod = VK_LOD_CLAMP_NONE;
bool m_AnisotropyEnable = true;
bool m_CompareEnable = false;
bool m_NormalizedCoordinates = true;
explicit
operator vk::SamplerCreateInfo() const
{
return {
.flags = m_Flags,
.magFilter = m_MagFilter,
.minFilter = m_MinFilter,
.mipmapMode = m_MipmapMode,
.addressModeU = m_AddressModeU,
.addressModeV = m_AddressModeV,
.addressModeW = m_AddressModeW,
.mipLodBias = m_MipLodBias,
.anisotropyEnable = m_AnisotropyEnable,
.maxAnisotropy = m_MaxAnisotropy,
.compareEnable = m_CompareEnable,
.compareOp = m_CompareOp,
.minLod = m_MinLod,
.maxLod = m_MaxLod,
.borderColor = m_BorderColor,
.unnormalizedCoordinates = !m_NormalizedCoordinates,
};
}
};
#pragma endregion
// ----------------------------------------------------------------------------------------------------
#pragma region Pipeline
// ----------------------------------------------------------------------------------------------------
struct AttributeInfo
{
u32 m_Location;
u32 m_Offset;
enum class Format
{
eFloat32X4,
eFloat32X3,
eFloat32X2,
eFloat32,
} m_Format;
[[nodiscard]] vk::Format
GetFormat() const
{
switch (m_Format)
{
case Format::eFloat32X4:
return vk::Format::eR32G32B32A32Sfloat;
case Format::eFloat32X3:
return vk::Format::eR32G32B32Sfloat;
case Format::eFloat32X2:
return vk::Format::eR32G32Sfloat;
case Format::eFloat32:
return vk::Format::eR32Sfloat;
}
return vk::Format::eUndefined;
}
};
struct VertexInput
{
eastl::vector<AttributeInfo> m_Attribute;
u32 m_Stride;
bool m_IsPerInstance;
};
enum class ShaderType
{
eInvalid = 0,
eVertex = VK_SHADER_STAGE_VERTEX_BIT,
eTesselationControl = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
eTesselationEvaluation = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
eGeometry = VK_SHADER_STAGE_GEOMETRY_BIT,
eFragment = VK_SHADER_STAGE_FRAGMENT_BIT,
eCompute = VK_SHADER_STAGE_COMPUTE_BIT,
eTask = VK_SHADER_STAGE_TASK_BIT_EXT,
eMesh = VK_SHADER_STAGE_MESH_BIT_EXT,
eMax,
};
constexpr static u32 ShaderTypeCount = 8;
static_assert(static_cast<u32>(ShaderType::eMax) == 1 + (1 << (ShaderTypeCount - 1)));
struct ShaderInfo
{
std::string_view m_ShaderFile;
eastl::vector<std::string_view> m_EntryPoints;
};
struct GraphicsPipelineCreateInfo
{
enum class DepthTest
{
eEnabled,
eReadOnly,
eDisabled,
};
enum class CompareOp
{
eNever = 0x0,
eLessThan = 0x1,
eEqualTo = 0x2,
eGreaterThan = 0x4,
eLessThanOrEqualTo = eLessThan | eEqualTo,
eGreaterThanOrEqualTo = eGreaterThan | eEqualTo,
eNotEqualTo = eLessThan | eGreaterThan,
eAlways = eLessThan | eEqualTo | eGreaterThan,
};
eastl::fixed_vector<VertexInput, 4, false> m_VertexInputs;
eastl::fixed_vector<ShaderInfo, 4, false> m_Shaders;
DepthTest m_DepthTest = DepthTest::eEnabled;
CompareOp m_DepthOp = CompareOp::eLessThan;
cstr m_Name;
private:
friend RenderingDevice;
[[nodiscard]] vk::PipelineDepthStencilStateCreateInfo GetDepthStencilStateCreateInfo() const;
};
struct ComputePipelineCreateInfo
{
ShaderInfo m_Shader;
cstr m_Name;
};
#pragma endregion
// ----------------------------------------------------------------------------------------------------
#pragma region Device
// ----------------------------------------------------------------------------------------------------
PhysicalDevice DefaultPhysicalDeviceSelector(PhysicalDevices const &physicalDevices);
using PhysicalDeviceSelectorFn = PhysicalDevice (*)(PhysicalDevices const &);
static_assert(std::convertible_to<decltype(DefaultPhysicalDeviceSelector), PhysicalDeviceSelectorFn>);
struct DeviceCreateInfo
{
std::reference_wrapper<Window> m_Window;
Features m_Features;
cstr m_AppName = "Aster App";
Version m_AppVersion = {0, 1, 0};
PhysicalDeviceSelectorFn m_PhysicalDeviceSelector = DefaultPhysicalDeviceSelector;
std::span<u8> m_PipelineCacheData = {};
eastl::vector<cstr> m_ShaderSearchPaths;
bool m_UseBindless = true;
cstr m_Name = "Primary";
};
#pragma endregion
#pragma endregion
namespace _internal
{
class SyncServer;
}
class Receipt
{
void *m_Opaque;
explicit Receipt(void *opaque)
: m_Opaque{opaque}
{
}
friend _internal::SyncServer;
};
struct Frame
{
// Persistent
RenderingDevice *m_Device;
// TODO: ThreadSafe
_internal::GraphicsContextPool m_PrimaryPool;
_internal::TransferContextPool m_AsyncTransferPool;
_internal::ComputeContextPool m_AsyncComputePool;
vk::Fence m_FrameAvailableFence;
vk::Semaphore m_ImageAcquireSem;
vk::Semaphore m_RenderFinishSem;
u32 m_FrameIdx;
// Transient
vk::Image m_SwapchainImage;
vk::ImageView m_SwapchainImageView;
Size2D m_SwapchainSize;
u32 m_ImageIdx;
void Reset(u32 imageIdx, vk::Image swapchainImage, vk::ImageView swapchainImageView, Size2D swapchainSize);
GraphicsContext CreateGraphicsContext();
TransferContext CreateAsyncTransferContext();
ComputeContext CreateAsyncComputeContext();
void WaitUntilReady();
Frame() = default;
Frame(RenderingDevice &device, u32 frameIndex, u32 primaryQueueFamily, u32 asyncTransferQueue,
u32 asyncComputeQueue);
Frame(Frame &&other) noexcept;
Frame &operator=(Frame &&other) noexcept;
DISALLOW_COPY_AND_ASSIGN(Frame);
~Frame() = default;
};
class CommitManager;
class RenderingDevice final
{
public: // TODO: Temp
std::reference_wrapper<Window> m_Window;
Instance m_Instance;
Surface m_Surface;
Device m_Device;
Swapchain m_Swapchain;
std::unique_ptr<CommitManager> m_CommitManager;
// TODO: This is single-threaded.
vk::Queue m_PrimaryQueue;
u32 m_PrimaryQueueFamily;
vk::Queue m_TransferQueue;
u32 m_TransferQueueFamily;
vk::Queue m_ComputeQueue;
u32 m_ComputeQueueFamily;
_internal::OrderlessTransferContextPool m_TransferContextPool;
_internal::OrderlessComputeContextPool m_ComputeContextPool;
std::array<Frame, MAX_FRAMES_IN_FLIGHT> m_Frames;
u32 m_CurrentFrameIdx = 0;
public:
// ====================================================================================================
// Resource Management
// ====================================================================================================
//
// Buffer Management
// ----------------------------------------------------------------------------------------------------
[[nodiscard]] Ref<StorageBuffer> CreateStorageBuffer(usize size, cstr name = nullptr);
[[nodiscard]] Ref<IndexBuffer> CreateIndexBuffer(usize size, cstr name = nullptr);
[[nodiscard]] Ref<UniformBuffer> CreateUniformBuffer(usize size, cstr name = nullptr);
[[nodiscard]] Ref<StagingBuffer> CreateStagingBuffer(usize size, cstr name = nullptr);
[[nodiscard]] Ref<VertexBuffer> CreateVertexBuffer(usize size, cstr name = nullptr);
//
// Image Management
// ----------------------------------------------------------------------------------------------------
template <concepts::ImageInto<Texture> T>
[[nodiscard]] Ref<T>
CreateTexture2D(Texture2DCreateInfo const &createInfo)
{
return CastImage<T>(CreateTexture2D(createInfo));
}
template <concepts::ImageInto<TextureCube> T>
[[nodiscard]] Ref<T>
CreateTextureCube(TextureCubeCreateInfo const &createInfo)
{
return CastImage<T>(CreateTextureCube(createInfo));
}
[[nodiscard]] Ref<Image> CreateTexture2D(Texture2DCreateInfo const &createInfo);
[[nodiscard]] Ref<ImageCube> CreateTextureCube(TextureCubeCreateInfo const &createInfo);
[[nodiscard]] Ref<Image> CreateAttachment(AttachmentCreateInfo const &createInfo);
[[nodiscard]] Ref<Image> CreateDepthStencilImage(DepthStencilImageCreateInfo const &createInfo);
//
// View Management
// ----------------------------------------------------------------------------------------------------
template <concepts::View TImageView>
Ref<TImageView>
CreateView(ViewCreateInfo<typename TImageView::ImageType> const &createInfo)
{
return CastView<TImageView>(CreateView(ViewCreateInfo<Image>(createInfo)));
}
[[nodiscard]] Ref<ImageView> CreateView(ViewCreateInfo<Image> const &createInfo);
//
// Image - View Combined Management
// ----------------------------------------------------------------------------------------------------
template <concepts::ViewTo<Image> T>
[[nodiscard]] Ref<T>
CreateTexture2DWithView(Texture2DCreateInfo const &createInfo)
{
auto handle = CreateTexture2DWithView(createInfo);
return CastView<T>(handle);
}
template <concepts::ViewTo<ImageCube> T>
[[nodiscard]] Ref<T>
CreateTextureCubeWithView(TextureCubeCreateInfo const &createInfo)
{
auto handle = CreateTextureCubeWithView(createInfo);
return CastView<T>(handle);
}
[[nodiscard]] Ref<TextureView> CreateTexture2DWithView(Texture2DCreateInfo const &createInfo);
[[nodiscard]] Ref<ImageCubeView> CreateTextureCubeWithView(TextureCubeCreateInfo const &createInfo);
[[nodiscard]] Ref<ImageView> CreateAttachmentWithView(AttachmentCreateInfo const &createInfo);
[[nodiscard]] Ref<ImageView> CreateDepthStencilImageWithView(DepthStencilImageCreateInfo const &createInfo);
//
// Sampler Management
// ----------------------------------------------------------------------------------------------------
private:
eastl::hash_map<vk::SamplerCreateInfo, WeakRef<Sampler>> m_HashToSamplerIdx;
public:
Ref<Sampler> CreateSampler(SamplerCreateInfo const &createInfo);
//
// Pipeline
// ----------------------------------------------------------------------------------------------------
// TODO: Cache shader modules for reuse. Time to move to `slang`
private:
Slang::ComPtr<slang::IGlobalSession> m_GlobalSlangSession;
Slang::ComPtr<slang::ISession> m_SlangSession;
PipelineCreationError
CreateShaders(eastl::fixed_vector<vk::PipelineShaderStageCreateInfo, ShaderTypeCount, false> &shadersOut,
Slang::ComPtr<slang::IComponentType> &program, std::span<ShaderInfo const> const &shaders);
PipelineCreationError
CreatePipelineLayout(vk::PipelineLayout &pipelineLayout, Slang::ComPtr<slang::IComponentType> const &program);
public:
// Pipelines, unlike the other resources, are not ref-counted.
PipelineCreationError CreateGraphicsPipeline(Pipeline &pipeline, GraphicsPipelineCreateInfo const &createInfo);
PipelineCreationError CreateComputePipeline(Pipeline &pipeline, ComputePipelineCreateInfo const &createInfo);
//
// Frames
// ----------------------------------------------------------------------------------------------------
public:
Frame &GetNextFrame();
Size2D
GetSwapchainSize() const
{
return {m_Swapchain.m_Extent.width, m_Swapchain.m_Extent.height};
}
void
RegisterResizeCallback(Swapchain::FnResizeCallback &&callback)
{
m_Swapchain.RegisterResizeCallback(eastl::forward<Swapchain::FnResizeCallback>(callback));
}
void Present(Frame &frame, GraphicsContext &graphicsContext);
//
// Context
// ----------------------------------------------------------------------------------------------------
friend Context;
friend GraphicsContext;
friend TransferContext;
TransferContext CreateTransferContext();
ComputeContext CreateComputeContext();
Receipt Submit(Context &context);
//
// Sync
// ----------------------------------------------------------------------------------------------------
std::unique_ptr<_internal::SyncServer> m_SyncServer;
void WaitOn(Receipt recpt);
//
// RenderingDevice Methods
// ----------------------------------------------------------------------------------------------------
template <concepts::VkHandle T>
void
SetName(T const &object, cstr name) const
{
m_Device.SetName(object, name);
}
[[nodiscard]] vk::Queue
GetQueue(u32 const familyIndex, u32 const queueIndex) const
{
return m_Device.GetQueue(familyIndex, queueIndex);
}
[[nodiscard]] eastl::vector<u8>
DumpPipelineCache() const
{
return m_Device.DumpPipelineCache();
}
void
WaitIdle() const
{
m_Device.WaitIdle();
}
// Inner
// ----------------------------------------------------------------------------------------------------
[[nodiscard]] Device &
GetInner()
{
return m_Device;
}
[[nodiscard]] vk::Device &
GetHandle()
{
return m_Device.m_Device;
}
// Ctor/Dtor
// ----------------------------------------------------------------------------------------------------
explicit RenderingDevice(DeviceCreateInfo const &createInfo);
~RenderingDevice();
PIN_MEMORY(RenderingDevice);
};
} // namespace systems

View File

@ -0,0 +1,145 @@
// =============================================
// Aster: resource.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "aster/core/buffer.h"
#include "aster/core/image.h"
#include "aster/core/image_view.h"
#include <EASTL/intrusive_ptr.h>
namespace systems
{
// ====================================================================================================
#pragma region Util Methods
// ====================================================================================================
#pragma region Buffer
// ----------------------------------------------------------------------------------------------------
template <std::derived_from<Buffer> TTo, std::derived_from<Buffer> TFrom>
static Ref<TTo>
CastBuffer(Ref<TFrom> const &from)
{
if constexpr (not concepts::BufferInto<TFrom, TTo>)
assert(TTo::FLAGS & from->m_Flags);
return eastl::reinterpret_pointer_cast<TTo>(from);
}
#pragma endregion
#pragma region Image
// ----------------------------------------------------------------------------------------------------
template <std::derived_from<Image> TTo, std::derived_from<Image> TFrom>
static Ref<TTo>
CastImage(Ref<TFrom> const &from)
{
if constexpr (not concepts::ImageInto<TFrom, TTo>)
assert(TTo::FLAGS & from->m_Flags_);
return eastl::reinterpret_pointer_cast<TTo>(from);
}
#pragma endregion
#pragma region View
// ----------------------------------------------------------------------------------------------------
template <concepts::View TTo, std::derived_from<Image> TFrom>
static Ref<TTo>
CastView(Ref<View<TFrom>> const &from)
{
if constexpr (not concepts::ImageInto<TFrom, typename TTo::ImageType>)
assert(TTo::ImageType::FLAGS & from->m_Image->m_Flags_);
return eastl::reinterpret_pointer_cast<TTo>(from);
}
#pragma endregion
#pragma endregion
/**
* ResId manages the lifetime of the committed resource.
* @tparam T Type of the committed resource.
*/
template <typename T>
class ResId
{
using IdType = u32;
public:
constexpr static IdType INVALID = MaxValue<IdType>;
private:
IdType m_Index;
u32 m_Padding = 0; //< Slang DescriptorHandle are a pair of u32. TODO: Use as validation.
explicit ResId(IdType const index)
: m_Index{index}
{
AddRef();
}
friend class CommitManager;
public:
static ResId
Null()
{
return ResId{INVALID};
}
ResId(ResId const &other)
: m_Index{other.m_Index}
{
AddRef();
}
ResId(ResId &&other) noexcept
: m_Index{other.m_Index}
{
AddRef();
}
ResId &
operator=(ResId const &other)
{
if (this == &other)
return *this;
m_Index = other.m_Index;
AddRef();
return *this;
}
ResId &
operator=(ResId &&other) noexcept
{
if (this == &other)
return *this;
m_Index = other.m_Index;
AddRef();
return *this;
}
~ResId()
{
Release();
}
private:
void AddRef() const; ///< Increases the refcount in the CommitManager.
void Release() const; ///< Decreases the refcount in the CommitManager.
};
struct NullId
{
template <typename T>
operator ResId<T>()
{
return ResId<T>::Null();
}
};
} // namespace systems

View File

@ -0,0 +1,79 @@
// =============================================
// Aster: sync_server.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "aster/aster.h"
#include "context.h"
#include <EASTL/deque.h>
#include <EASTL/intrusive_list.h>
namespace systems
{
class Receipt;
class RenderingDevice;
} // namespace systems
namespace systems::_internal
{
struct TimelinePoint
{
u64 m_WaitValue;
u64 m_NextValue;
};
class SyncServer
{
struct Entry : eastl::intrusive_list_node
{
vk::Semaphore m_Semaphore;
TimelinePoint m_CurrentPoint;
ContextPool *m_AttachedPool;
explicit Entry(RenderingDevice &device);
void Destroy(RenderingDevice &device);
void Wait(RenderingDevice &device);
void Next();
void AttachPool(ContextPool *pool);
Entry(Entry &&) = default;
Entry &operator=(Entry &&) = default;
~Entry() = default;
DISALLOW_COPY_AND_ASSIGN(Entry);
};
RenderingDevice *m_Device;
eastl::deque<Entry> m_Allocations;
eastl::intrusive_list<Entry> m_FreeList;
public:
Receipt Allocate();
void Free(Receipt);
void WaitOn(Receipt);
private:
static Entry &GetEntry(Receipt receipt);
// Inner Alloc/Free functions.
Entry &AllocateEntry();
void FreeEntry(Entry &entry);
// Constructor/Destructor
explicit SyncServer(RenderingDevice &device);
public:
~SyncServer();
// Move Constructors.
SyncServer(SyncServer &&other) noexcept;
SyncServer &operator=(SyncServer &&other) noexcept;
friend RenderingDevice;
DISALLOW_COPY_AND_ASSIGN(SyncServer);
};
} // namespace systems::_internal

View File

@ -3,4 +3,7 @@
cmake_minimum_required(VERSION 3.13)
target_sources(aster_core
INTERFACE "logger.h")
INTERFACE
"logger.h"
"freelist.h"
"files.h")

View File

@ -0,0 +1,15 @@
// =============================================
// Aster: files.h
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "aster/core/constants.h"
#include <EASTL/span.h>
#include <EASTL/vector.h>
eastl::vector<u32> ReadFile(std::string_view fileName);
eastl::vector<u8> ReadFileBytes(std::string_view fileName, bool errorOnFail = true);
bool WriteFileBytes(std::string_view fileName, eastl::span<u8> data);

View File

@ -0,0 +1,96 @@
// =============================================
// Aster: freelist.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include <optional>
struct FreeListNode
{
FreeListNode *m_Next;
};
template <typename T>
concept FreeListCapable = sizeof(T) >= sizeof(FreeListNode);
template <FreeListCapable T>
struct FreeList
{
using Value = T;
using Reference = T &;
using ConstReference = T const &;
using Pointer = T *;
FreeListNode *m_Top;
FreeList()
: m_Top{nullptr}
{
}
FreeList(FreeList &&other) noexcept
: m_Top{Take(other.m_Top)}
{
}
FreeList &
operator=(FreeList &&other) noexcept
{
if (this == &other)
return *this;
m_Top = Take(other.m_Top);
return *this;
}
DISALLOW_COPY_AND_ASSIGN(FreeList);
~FreeList()
{
m_Top = nullptr;
}
[[nodiscard]] bool
Empty() const
{
return !m_Top;
}
[[nodiscard]] Reference
Pop()
{
assert(m_Top);
Reference ref = *reinterpret_cast<Pointer>(m_Top);
m_Top = m_Top->m_Next;
return ref;
}
void
Push(Reference ref)
{
auto next = reinterpret_cast<FreeListNode *>(&ref);
next->m_Next = m_Top;
m_Top = next;
}
[[nodiscard]] ConstReference
Peek() const
{
assert(m_Top);
return *m_Top;
}
[[nodiscard]] Reference
Peek()
{
assert(m_Top);
return *m_Top;
}
void
Clear()
{
m_Top = nullptr;
}
};

View File

@ -20,16 +20,16 @@ struct Logger
eVerbose,
};
u32 m_MinimumLoggingLevel{Cast<u32>(LogType::eDebug)};
u32 m_MinimumLoggingLevel{static_cast<u32>(LogType::eDebug)};
void
SetMinimumLoggingLevel(LogType logType)
{
m_MinimumLoggingLevel = Cast<u32>(logType);
m_MinimumLoggingLevel = static_cast<u32>(logType);
}
template <LogType TLogLevel>
constexpr static const char *
constexpr static char const *
ToCstr()
{
if constexpr (TLogLevel == LogType::eError)
@ -45,7 +45,7 @@ struct Logger
}
template <LogType TLogLevel>
constexpr static const char *
constexpr static char const *
ToColorCstr()
{
if constexpr (TLogLevel == LogType::eError)
@ -62,9 +62,9 @@ struct Logger
template <LogType TLogLevel>
void
Log(const std::string_view &message, const char *loc, u32 line) const
Log(std::string_view const &message, char const *loc, u32 line) const
{
if (Cast<u32>(TLogLevel) <= m_MinimumLoggingLevel)
if (static_cast<u32>(TLogLevel) <= m_MinimumLoggingLevel)
{
fmt::println("{}{} {} {} at {}:{}{}", ToColorCstr<TLogLevel>(), ToCstr<TLogLevel>(), message.data(),
ansi_color::Black, loc, line, ansi_color::Reset);
@ -79,9 +79,9 @@ struct Logger
template <LogType TLogLevel>
void
LogCond(const char *exprStr, const std::string_view &message, const char *loc, u32 line) const
LogCond(char const *exprStr, std::string_view const &message, char const *loc, u32 line) const
{
if (Cast<u32>(TLogLevel) <= m_MinimumLoggingLevel)
if (static_cast<u32>(TLogLevel) <= m_MinimumLoggingLevel)
{
fmt::println("{}{} ({}) {} {} at {}:{}{}", ToColorCstr<TLogLevel>(), ToCstr<TLogLevel>(), exprStr,
message.data(), ansi_color::Black, loc, line, ansi_color::Reset);
@ -103,26 +103,26 @@ extern Logger g_Logger;
#define INFO(...) g_Logger.Log<Logger::LogType::eInfo>(fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define ERROR_IF(expr, ...) \
if (Cast<bool>(expr)) [[unlikely]] \
if (static_cast<bool>(expr)) [[unlikely]] \
g_Logger.LogCond<Logger::LogType::eError>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define WARN_IF(expr, ...) \
if (Cast<bool>(expr)) [[unlikely]] \
if (static_cast<bool>(expr)) [[unlikely]] \
g_Logger.LogCond<Logger::LogType::eWarning>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define INFO_IF(expr, ...) \
if (Cast<bool>(expr)) \
if (static_cast<bool>(expr)) \
g_Logger.LogCond<Logger::LogType::eInfo>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define ELSE_IF_ERROR(expr, ...) \
; \
else if (Cast<bool>(expr)) \
else if (static_cast<bool>(expr)) \
[[unlikely]] g_Logger.LogCond<Logger::LogType::eError>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define ELSE_IF_WARN(expr, ...) \
; \
else if (Cast<bool>(expr)) \
else if (static_cast<bool>(expr)) \
[[unlikely]] g_Logger.LogCond<Logger::LogType::eWarning>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define ELSE_IF_INFO(expr, ...) \
; \
else if (Cast<bool>(expr)) \
else if (static_cast<bool>(expr)) \
g_Logger.LogCond<Logger::LogType::eInfo>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define ELSE_ERROR(...) \
@ -139,11 +139,11 @@ extern Logger g_Logger;
#define DEBUG(...) g_Logger.Log<Logger::LogType::eDebug>(fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define DEBUG_IF(expr, ...) \
if (Cast<bool>(expr)) \
if (static_cast<bool>(expr)) \
g_Logger.LogCond<Logger::LogType::eDebug>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define ELSE_IF_DEBUG(expr, ...) \
; \
else if (Cast<bool>(expr)) \
else if (static_cast<bool>(expr)) \
g_Logger.LogCond<Logger::LogType::eDebug>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define ELSE_DEBUG(...) \
; \
@ -174,11 +174,11 @@ extern Logger g_Logger;
#define VERBOSE(...) g_Logger.Log<Logger::LogType::eVerbose>(fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define VERBOSE_IF(expr, ...) \
if (Cast<bool>(expr)) \
if (static_cast<bool>(expr)) \
g_Logger.LogCond<Logger::LogType::eVerbose>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define ELSE_IF_VERBOSE(expr, ...) \
; \
else if (Cast<bool>(expr)) \
else if (static_cast<bool>(expr)) \
g_Logger.LogCond<Logger::LogType::eVerbose>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define ELSE_VERBOSE(...) \
; \
@ -207,5 +207,5 @@ extern Logger g_Logger;
#endif // !defined(VERBOSE_LOG_DISABLED)
#define DO(code) , code
#define ABORT(code) exit(Cast<i32>(code))
#define ABORT(code) exit(static_cast<i32>(code))
#define THEN_ABORT(code) , ABORT(code)

View File

@ -5,7 +5,7 @@ cmake_minimum_required(VERSION 3.13)
target_sources(aster_core
PRIVATE
"global.cpp"
"context.cpp"
"instance.cpp"
"physical_device.cpp"
"device.cpp"
"swapchain.cpp"
@ -13,4 +13,5 @@ PRIVATE
"buffer.cpp"
"image.cpp"
"surface.cpp"
"window.cpp")
"window.cpp"
"sampler.cpp")

View File

@ -1,35 +1,25 @@
// =============================================
// Aster: buffer.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "core/buffer.h"
#include "core/device.h"
void
Buffer::Destroy(const Device *device)
Buffer::Buffer(Device const *device, usize const size, vk::BufferUsageFlags const bufferUsage,
VmaAllocationCreateFlags const allocationFlags, VmaMemoryUsage const memoryUsage, cstr const name)
{
if (!IsValid() || !IsOwned())
return;
assert(!m_Buffer);
vmaDestroyBuffer(device->m_Allocator, m_Buffer, m_Allocation);
m_Size_ = 0;
}
void
Buffer::Allocate(const Device *device, usize size, vk::BufferUsageFlags bufferUsage,
VmaAllocationCreateFlags allocationFlags, VmaMemoryUsage memoryUsage, cstr name)
{
assert(!IsValid());
assert(size <= SIZE_MASK);
m_Device = device;
vk::BufferCreateInfo bufferCreateInfo = {
.size = size,
.usage = bufferUsage,
.usage = bufferUsage | vk::BufferUsageFlagBits::eShaderDeviceAddress,
.sharingMode = vk::SharingMode::eExclusive,
};
const VmaAllocationCreateInfo allocationCreateInfo = {
VmaAllocationCreateInfo const allocationCreateInfo = {
.flags = allocationFlags,
.usage = memoryUsage,
};
@ -37,156 +27,88 @@ Buffer::Allocate(const Device *device, usize size, vk::BufferUsageFlags bufferUs
VkBuffer buffer;
VmaAllocation allocation;
VmaAllocationInfo allocationInfo;
auto result = Cast<vk::Result>(vmaCreateBuffer(device->m_Allocator, Recast<VkBufferCreateInfo *>(&bufferCreateInfo),
&allocationCreateInfo, &buffer, &allocation, &allocationInfo));
auto result = static_cast<vk::Result>(
vmaCreateBuffer(device->m_Allocator, reinterpret_cast<VkBufferCreateInfo *>(&bufferCreateInfo),
&allocationCreateInfo, &buffer, &allocation, &allocationInfo));
ERROR_IF(Failed(result), "Could not allocate buffer. Cause: {}", result) THEN_ABORT(result);
vk::MemoryPropertyFlags memoryPropertyFlags;
vmaGetAllocationMemoryProperties(device->m_Allocator, allocation,
Recast<VkMemoryPropertyFlags *>(&memoryPropertyFlags));
// vk::MemoryPropertyFlags memoryPropertyFlags;
// vmaGetAllocationMemoryProperties(device->m_Allocator, allocation, Recast<VkMemoryPropertyFlags
// *>(&memoryPropertyFlags));
// TODO: Actually track Host Access
// bool hostAccessible = Cast<bool>(memoryPropertyFlags & vk::MemoryPropertyFlagBits::eHostVisible);
// bool hostAccessible = static_cast<bool>(memoryPropertyFlags & vk::MemoryPropertyFlagBits::eHostVisible);
m_Buffer = buffer;
m_Size_ = size | VALID_BUFFER_BIT | OWNED_BIT;
m_Size = size;
m_Allocation = allocation;
m_Mapped = Cast<u8 *>(allocationInfo.pMappedData);
m_Mapped = static_cast<u8 *>(allocationInfo.pMappedData);
m_Flags = {};
if (bufferUsage & vk::BufferUsageFlagBits::eTransferSrc)
m_Flags |= FlagBits::eStaging;
if (bufferUsage & vk::BufferUsageFlagBits::eIndexBuffer)
m_Flags |= FlagBits::eIndex;
if (bufferUsage & vk::BufferUsageFlagBits::eIndirectBuffer)
m_Flags |= FlagBits::eIndirect;
if (bufferUsage & vk::BufferUsageFlagBits::eVertexBuffer)
m_Flags |= FlagBits::eVertex;
if (bufferUsage & vk::BufferUsageFlagBits::eUniformBuffer)
m_Flags |= FlagBits::eUniform;
if (bufferUsage & vk::BufferUsageFlagBits::eStorageBuffer)
m_Flags |= FlagBits::eStorage;
vk::BufferDeviceAddressInfo const addressInfo = {.buffer = m_Buffer};
m_DeviceAddr = m_Device->m_Device.getBufferAddress(&addressInfo);
device->SetName(m_Buffer, name);
}
uptr
Buffer::GetDeviceAddress(const Device *device)
Buffer::Buffer(Buffer &&other) noexcept
: m_Device{Take(other.m_Device)}
, m_Buffer{Take(other.m_Buffer)}
, m_Allocation{Take(other.m_Allocation)}
, m_Mapped{Take(other.m_Mapped)}
, m_DeviceAddr{Take(other.m_DeviceAddr)}
, m_Size{Take(other.m_Size)}
{
vk::BufferDeviceAddressInfo addressInfo = {.buffer = m_Buffer};
return device->m_Device.getBufferAddress(&addressInfo);
}
Buffer &
Buffer::operator=(Buffer &&other) noexcept
{
if (this == &other)
return *this;
using std::swap;
swap(m_Device, other.m_Device);
swap(m_Buffer, other.m_Buffer);
swap(m_Allocation, other.m_Allocation);
swap(m_Mapped, other.m_Mapped);
swap(m_DeviceAddr, other.m_DeviceAddr);
swap(m_Size, other.m_Size);
return *this;
}
Buffer::~Buffer()
{
if (!m_Buffer)
return;
vmaDestroyBuffer(m_Device->m_Allocator, Take(m_Buffer), m_Allocation);
m_Size = 0;
}
uptr
Buffer::GetDeviceAddress() const
{
return m_DeviceAddr;
}
void
Buffer::Write(const Device *device, usize offset, usize size, const void *data)
Buffer::Write(usize const offset, usize const size, void const *data) const
{
assert(IsHostVisible());
if (!IsMapped())
{
void *mapped;
auto result = Cast<vk::Result>(vmaMapMemory(device->m_Allocator, m_Allocation, &mapped));
ERROR_IF(Failed(result), "Memory mapping failed. Cause: {}", result);
if (!Failed(result))
{
m_Mapped = Cast<u8 *>(mapped);
memcpy(m_Mapped + offset, data, size);
vmaUnmapMemory(device->m_Allocator, m_Allocation);
m_Mapped = nullptr;
}
}
else
{
memcpy(m_Mapped + offset, data, size);
}
assert(IsMapped());
memcpy(m_Mapped + offset, data, size);
// TODO: Debug this.
// auto result = Cast<vk::Result>(vmaCopyMemoryToAllocation(device->m_Allocator, &data, m_Allocation, 0, size));
// ERROR_IF(Failed(result), "Writing to buffer failed. Cause: {}", result) THEN_ABORT(result);
}
void
UniformBuffer::Init(const Device *device, const usize size, const cstr name)
{
Allocate(device, size, vk::BufferUsageFlagBits::eUniformBuffer,
VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT,
VMA_MEMORY_USAGE_AUTO, name);
}
void
StorageBuffer::Init(const Device *device, usize size, bool hostVisible, cstr name)
{
Init(device, size, hostVisible, false, name);
}
void
StorageBuffer::Init(const Device *device, usize size, bool hostVisible, bool deviceAddress, cstr name)
{
vk::BufferUsageFlags usage = vk::BufferUsageFlagBits::eStorageBuffer;
if (deviceAddress)
{
usage |= vk::BufferUsageFlagBits::eShaderDeviceAddress;
}
if (hostVisible)
{
Allocate(device, size, usage,
VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT,
VMA_MEMORY_USAGE_AUTO, name);
}
else
{
usage |= vk::BufferUsageFlagBits::eTransferDst;
Allocate(device, size, usage, 0,
VMA_MEMORY_USAGE_AUTO, name);
}
}
void
StorageIndexBuffer::Init(const Device *device, usize size, bool hostVisible, bool deviceAddress, cstr name)
{
vk::BufferUsageFlags usage = vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eIndexBuffer;
if (deviceAddress)
{
usage |= vk::BufferUsageFlagBits::eShaderDeviceAddress;
}
if (hostVisible)
{
Allocate(device, size, usage,
VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT,
VMA_MEMORY_USAGE_AUTO, name);
}
else
{
usage |= vk::BufferUsageFlagBits::eTransferDst;
Allocate(device, size, usage, 0, VMA_MEMORY_USAGE_AUTO, name);
}
}
void
IndirectBuffer::Init(const Device *device, usize size, bool hostVisible, cstr name)
{
vk::BufferUsageFlags usage = vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eIndirectBuffer | vk::BufferUsageFlagBits::eShaderDeviceAddress;
if (hostVisible)
{
Allocate(device, size, usage,
VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT,
VMA_MEMORY_USAGE_AUTO, name);
}
else
{
usage |= vk::BufferUsageFlagBits::eTransferDst;
Allocate(device, size, usage, 0, VMA_MEMORY_USAGE_AUTO, name);
}
}
void
VertexBuffer::Init(const Device *device, usize size, cstr name)
{
Allocate(device, size, vk::BufferUsageFlagBits::eVertexBuffer | vk::BufferUsageFlagBits::eTransferDst,
0, VMA_MEMORY_USAGE_AUTO, name);
}
void
IndexBuffer::Init(const Device *device, usize size, cstr name)
{
Allocate(device, size, vk::BufferUsageFlagBits::eIndexBuffer | vk::BufferUsageFlagBits::eTransferDst,
0, VMA_MEMORY_USAGE_AUTO, name);
}
void
StagingBuffer::Init(const Device *device, usize size, cstr name)
{
Allocate(device, size, vk::BufferUsageFlagBits::eTransferSrc,
VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT,
VMA_MEMORY_USAGE_AUTO, name);
// auto result = static_cast<vk::Result>(vmaCopyMemoryToAllocation(device->m_Allocator, &data, m_Allocation, 0,
// size)); ERROR_IF(Failed(result), "Writing to buffer failed. Cause: {}", result) THEN_ABORT(result);
}

View File

@ -1,11 +1,11 @@
// =============================================
// Aster: device.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "core/device.h"
#include "core/context.h"
#include "core/instance.h"
#include "core/physical_device.h"
#include "core/queue_allocation.h"
@ -17,18 +17,12 @@ constexpr eastl::array DEVICE_EXTENSIONS = {
VK_KHR_SWAPCHAIN_EXTENSION_NAME,
};
Device::Device(const Context *context, PhysicalDevice *physicalDevice, Features *enabledFeatures,
const eastl::vector<QueueAllocation> &queueAllocations, NameString &&name)
: Device(context, physicalDevice, enabledFeatures, queueAllocations, {}, std::move(name))
{
}
Device::Device(const Context *context, PhysicalDevice *physicalDevice, Features *enabledFeatures,
const eastl::vector<QueueAllocation> &queueAllocations, eastl::span<u8> &&pipelineCacheData,
Device::Device(Instance const &context, PhysicalDevice &physicalDevice, Features &enabledFeatures,
eastl::span<QueueAllocation> const &queueAllocations, eastl::span<u8> const &pipelineCacheData,
NameString &&name)
: m_Name(std::move(name))
, m_PhysicalDevice(physicalDevice->m_PhysicalDevice)
, m_ValidationEnabled(context->m_DebugMessenger != nullptr)
, m_PhysicalDevice(physicalDevice.m_PhysicalDevice)
, m_ValidationEnabled(context.m_DebugMessenger != nullptr)
{
// Shouldn't have more than 4 deviceQueueFamilies in use anyway. Else we can heap
eastl::fixed_vector<vk::DeviceQueueCreateInfo, 4> deviceQueueCreateInfos;
@ -51,19 +45,19 @@ Device::Device(const Context *context, PhysicalDevice *physicalDevice, Features
});
}
vk::PhysicalDeviceFeatures *deviceFeatures = &enabledFeatures->m_Vulkan10Features;
vk::PhysicalDeviceVulkan11Features *vulkan11Features = &enabledFeatures->m_Vulkan11Features;
vk::PhysicalDeviceVulkan12Features *vulkan12Features = &enabledFeatures->m_Vulkan12Features;
vk::PhysicalDeviceVulkan13Features *vulkan13Features = &enabledFeatures->m_Vulkan13Features;
vk::PhysicalDeviceFeatures *deviceFeatures = &enabledFeatures.m_Vulkan10Features;
vk::PhysicalDeviceVulkan11Features *vulkan11Features = &enabledFeatures.m_Vulkan11Features;
vk::PhysicalDeviceVulkan12Features *vulkan12Features = &enabledFeatures.m_Vulkan12Features;
vk::PhysicalDeviceVulkan13Features *vulkan13Features = &enabledFeatures.m_Vulkan13Features;
vulkan11Features->pNext = vulkan12Features;
vulkan12Features->pNext = vulkan13Features;
vk::DeviceCreateInfo deviceCreateInfo = {
.pNext = vulkan11Features,
.queueCreateInfoCount = Cast<u32>(deviceQueueCreateInfos.size()),
.queueCreateInfoCount = static_cast<u32>(deviceQueueCreateInfos.size()),
.pQueueCreateInfos = deviceQueueCreateInfos.data(),
.enabledExtensionCount = Cast<u32>(DEVICE_EXTENSIONS.size()),
.enabledExtensionCount = static_cast<u32>(DEVICE_EXTENSIONS.size()),
.ppEnabledExtensionNames = DEVICE_EXTENSIONS.data(),
.pEnabledFeatures = deviceFeatures,
};
@ -71,25 +65,25 @@ Device::Device(const Context *context, PhysicalDevice *physicalDevice, Features
vk::Result result = m_PhysicalDevice.createDevice(&deviceCreateInfo, nullptr, &m_Device);
ERROR_IF(Failed(result), "Could not initialize Vulkan Device. Cause: {}", result)
THEN_ABORT(result)
ELSE_DEBUG("{} ({}) Initialized.", m_Name, physicalDevice->m_DeviceProperties.deviceName.data());
ELSE_DEBUG("{} ({}) Initialized.", m_Name, physicalDevice.m_DeviceProperties.deviceName.data());
SetName(m_Device, m_Name.data());
VmaVulkanFunctions vmaVulkanFunctions = {
.vkGetInstanceProcAddr = vk::defaultDispatchLoaderDynamic.vkGetInstanceProcAddr,
.vkGetDeviceProcAddr = vk::defaultDispatchLoaderDynamic.vkGetDeviceProcAddr,
.vkGetInstanceProcAddr = vk::detail::defaultDispatchLoaderDynamic.vkGetInstanceProcAddr,
.vkGetDeviceProcAddr = vk::detail::defaultDispatchLoaderDynamic.vkGetDeviceProcAddr,
};
const VmaAllocatorCreateInfo allocatorCreateInfo = {
VmaAllocatorCreateInfo const allocatorCreateInfo = {
.flags = VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT,
.physicalDevice = m_PhysicalDevice,
.device = m_Device,
.pVulkanFunctions = &vmaVulkanFunctions,
.instance = context->m_Instance,
.instance = context.m_Instance,
.vulkanApiVersion = ASTER_API_VERSION,
};
result = Cast<vk::Result>(vmaCreateAllocator(&allocatorCreateInfo, &m_Allocator));
result = static_cast<vk::Result>(vmaCreateAllocator(&allocatorCreateInfo, &m_Allocator));
ERROR_IF(Failed(result), "Memory allocator creation failed. Cause: {}", result)
DO(m_Device.destroy(nullptr))
THEN_ABORT(result)
@ -110,6 +104,9 @@ Device::Device(const Context *context, PhysicalDevice *physicalDevice, Features
Device::~Device()
{
if (!m_Device)
return;
m_Device.destroy(m_PipelineCache, nullptr);
if (m_Allocator)
{
@ -123,7 +120,7 @@ Device::~Device()
}
vk::Queue
Device::GetQueue(const u32 familyIndex, const u32 queueIndex) const
Device::GetQueue(u32 const familyIndex, u32 const queueIndex) const
{
vk::Queue queue;
m_Device.getQueue(familyIndex, queueIndex, &queue);
@ -156,6 +153,7 @@ Device::Device(Device &&other) noexcept
, m_PhysicalDevice(Take(other.m_PhysicalDevice))
, m_Device(Take(other.m_Device))
, m_Allocator(Take(other.m_Allocator))
, m_PipelineCache(Take(other.m_PipelineCache))
{
}
@ -168,5 +166,6 @@ Device::operator=(Device &&other) noexcept
m_PhysicalDevice = Take(other.m_PhysicalDevice);
m_Device = Take(other.m_Device);
m_Allocator = Take(other.m_Allocator);
m_PipelineCache = Take(other.m_PipelineCache);
return *this;
}

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: global.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "core/global.h"
@ -26,11 +26,11 @@ struct MemorySize
{
usize totalBytes = bytes + m_Bytes;
m_Bytes = totalBytes % 1024;
const usize totalKb = m_Kilobytes + totalBytes / 1024;
usize const totalKb = m_Kilobytes + totalBytes / 1024;
m_Kilobytes = totalKb % 1024;
const usize totalMb = m_Megabytes + totalKb / 1024;
usize const totalMb = m_Megabytes + totalKb / 1024;
m_Megabytes = totalMb % 1024;
m_Gigabytes += Cast<u16>(totalMb / 1024);
m_Gigabytes += static_cast<u16>(totalMb / 1024);
return *this;
}
@ -56,23 +56,23 @@ struct fmt::formatter<MemorySize>
// return format_to(ctx.out(), "({}, {})", foo.a, foo.b); // --== KEY LINE ==--
if (mem.m_Gigabytes > 0)
{
return v10::format_to(ctx.out(), "{}.{} GB", mem.m_Gigabytes, Cast<u16>(mem.m_Megabytes / 1024.0));
return fmt::format_to(ctx.out(), "{}.{} GB", mem.m_Gigabytes, static_cast<u16>(mem.m_Megabytes / 1024.0));
}
if (mem.m_Megabytes > 0)
{
return v10::format_to(ctx.out(), "{}.{} MB", mem.m_Megabytes, Cast<u16>(mem.m_Kilobytes / 1024.0));
return fmt::format_to(ctx.out(), "{}.{} MB", mem.m_Megabytes, static_cast<u16>(mem.m_Kilobytes / 1024.0));
}
if (mem.m_Kilobytes > 0)
{
return v10::format_to(ctx.out(), "{}.{} KB", mem.m_Kilobytes, Cast<u16>(mem.m_Bytes / 1024.0));
return fmt::format_to(ctx.out(), "{}.{} KB", mem.m_Kilobytes, static_cast<u16>(mem.m_Bytes / 1024.0));
}
return v10::format_to(ctx.out(), "{} Bytes", mem.m_Bytes);
return fmt::format_to(ctx.out(), "{} Bytes", mem.m_Bytes);
}
};
void *
operator new[](size_t size, const char * /*pName*/, int flags, unsigned /*debugFlags*/, const char * /*file*/,
operator new[](size_t size, char const * /*pName*/, int flags, unsigned /*debugFlags*/, char const * /*file*/,
int /*line*/)
{
g_TotalAlloc += size;
@ -82,8 +82,8 @@ operator new[](size_t size, const char * /*pName*/, int flags, unsigned /*debugF
}
void *
operator new[](size_t size, size_t /*alignment*/, size_t /*alignmentOffset*/, const char * /*pName*/, int flags,
unsigned /*debugFlags*/, const char * /*file*/, int /*line*/)
operator new[](size_t size, size_t /*alignment*/, size_t /*alignmentOffset*/, char const * /*pName*/, int flags,
unsigned /*debugFlags*/, char const * /*file*/, int /*line*/)
{
g_TotalAlloc += size;

View File

@ -1,425 +1,482 @@
// =============================================
// Aster: image.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "core/image.h"
#include "core/device.h"
void
Image::Destroy(const Device *device)
Image &
Image::operator=(Image &&other) noexcept
{
if (!IsValid() || !IsOwned())
{
m_Flags_ = 0;
if (this == &other)
return *this;
using std::swap;
swap(m_Device, other.m_Device);
swap(m_Image, other.m_Image);
swap(m_Allocation, other.m_Allocation);
swap(m_Extent, other.m_Extent);
swap(m_Format, other.m_Format);
swap(m_EmptyPadding_, other.m_EmptyPadding_);
swap(m_Flags_, other.m_Flags_);
swap(m_LayerCount, other.m_LayerCount);
swap(m_MipLevels, other.m_MipLevels);
return *this;
}
Image::~Image()
{
if (!IsValid())
return;
}
device->m_Device.destroy(m_View, nullptr);
vmaDestroyImage(device->m_Allocator, m_Image, m_Allocation);
m_Flags_ = 0;
vmaDestroyImage(m_Device->m_Allocator, Take(m_Image), m_Allocation);
m_Flags_ = {};
}
void
Texture::Init(const Device *device, const vk::Extent2D extent, vk::Format imageFormat, const bool isMipMapped,
const cstr name)
Image::DestroyView(vk::ImageView const imageView) const
{
WARN_IF(!IsPowerOfTwo(extent.width) || !IsPowerOfTwo(extent.width), "Image {2} is {0}x{1} (Non Power of Two)",
extent.width, extent.height, name ? name : "<unnamed>");
const u8 mipLevels = isMipMapped ? 1 + Cast<u8>(floor(log2(eastl::max(extent.width, extent.height)))) : 1;
auto usage = vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferDst;
if (isMipMapped)
{
usage |= vk::ImageUsageFlagBits::eTransferSrc;
}
vk::ImageCreateInfo imageCreateInfo = {
.imageType = vk::ImageType::e2D,
.format = imageFormat,
.extent = ToExtent3D(extent, 1),
.mipLevels = mipLevels,
.arrayLayers = 1,
.samples = vk::SampleCountFlagBits::e1,
.tiling = vk::ImageTiling::eOptimal,
.usage = usage,
.sharingMode = vk::SharingMode::eExclusive,
.initialLayout = vk::ImageLayout::eUndefined,
};
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = {},
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
auto result = Cast<vk::Result>(vmaCreateImage(device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", name, result) THEN_ABORT(result);
vk::ImageView view;
vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::e2D,
.format = imageFormat,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = mipLevels,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", name, result) THEN_ABORT(result);
m_Image = image;
m_View = view;
m_Allocation = allocation;
m_Extent = imageCreateInfo.extent;
m_Flags_ = OWNED_BIT | VALID_BIT;
m_LayerCount = 1;
m_MipLevels = mipLevels;
device->SetName(m_Image, name);
m_Device->m_Device.destroy(imageView, nullptr);
}
/*
Cube map Faces info.
TODO: Correct this based on the actual layout for upside down viewport.
| Axis | Layer | Up |
|:----:|:-----:|:--:|
| +x | 0 | -y |
| -x | 1 | -y |
| +y | 2 | +z |
| -y | 3 | -z |
| +z | 4 | -y |
| -z | 5 | -y |
Remember, we use upside down viewport.
*/
void
TextureCube::Init(const Device *device, u32 cubeSide, vk::Format imageFormat, bool isMipMapped, cstr name)
//
// void
// Texture::Init(const Device *device, const vk::Extent2D extent, vk::Format imageFormat, const bool isMipMapped,
// const cstr name)
//{
// WARN_IF(!IsPowerOfTwo(extent.width) || !IsPowerOfTwo(extent.width), "Image {2} is {0}x{1} (Non Power of Two)",
// extent.width, extent.height, name ? name : "<unnamed>");
//
// const u8 mipLevels = isMipMapped ? 1 + static_cast<u8>(floor(log2(eastl::max(extent.width, extent.height)))) : 1;
//
// auto usage = vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferDst;
// if (isMipMapped)
// {
// usage |= vk::ImageUsageFlagBits::eTransferSrc;
// }
//
// vk::ImageCreateInfo imageCreateInfo = {
// .imageType = vk::ImageType::e2D,
// .format = imageFormat,
// .extent = ToExtent3D(extent, 1),
// .mipLevels = mipLevels,
// .arrayLayers = 1,
// .samples = vk::SampleCountFlagBits::e1,
// .tiling = vk::ImageTiling::eOptimal,
// .usage = usage,
// .sharingMode = vk::SharingMode::eExclusive,
// .initialLayout = vk::ImageLayout::eUndefined,
// };
// constexpr VmaAllocationCreateInfo allocationCreateInfo = {
// .flags = {},
// .usage = VMA_MEMORY_USAGE_AUTO,
// };
//
// VkImage image;
// VmaAllocation allocation;
// auto result = static_cast<vk::Result>(vmaCreateImage(device->m_Allocator, reinterpret_cast<VkImageCreateInfo
// *>(&imageCreateInfo),
// &allocationCreateInfo, &image, &allocation, nullptr));
// ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", name, result) THEN_ABORT(result);
//
// vk::ImageView view;
// vk::ImageViewCreateInfo imageViewCreateInfo = {
// .image = image,
// .viewType = vk::ImageViewType::e2D,
// .format = imageFormat,
// .components = {},
// .subresourceRange =
// {
// .aspectMask = vk::ImageAspectFlagBits::eColor,
// .baseMipLevel = 0,
// .levelCount = mipLevels,
// .baseArrayLayer = 0,
// .layerCount = 1,
// },
// };
// result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
// ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", name, result) THEN_ABORT(result);
//
// m_Device = device;
// m_Image = image;
// m_View = view;
// m_Allocation = allocation;
// m_Extent = imageCreateInfo.extent;
// m_LayerCount = 1;
// m_MipLevels = mipLevels;
//
// device->SetName(m_Image, name);
//}
//
///*
// Cube map Faces info.
//
// TODO: Correct this based on the actual layout for upside down viewport.
//
//| Axis | Layer | Up |
//|:----:|:-----:|:--:|
//| +x | 0 | -y |
//| -x | 1 | -y |
//| +y | 2 | +z |
//| -y | 3 | -z |
//| +z | 4 | -y |
//| -z | 5 | -y |
//
// Remember, we use upside down viewport.
//
//*/
//
// void
// TextureCube::Init(const Device *device, u32 cubeSide, vk::Format imageFormat, bool isMipMapped, cstr name)
//{
// WARN_IF(!IsPowerOfTwo(cubeSide), "Image Cube {1} has side {0}x{0} (Non Power of Two)", cubeSide,
// name ? name : "<unnamed>");
//
// const u8 mipLevels = isMipMapped ? 1 + static_cast<u8>(floor(log2(cubeSide))) : 1;
//
// auto usage = vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferDst;
// if (isMipMapped)
// {
// usage |= vk::ImageUsageFlagBits::eTransferSrc;
// }
//
// const vk::Extent3D extent = {.width = cubeSide, .height = cubeSide, .depth = 1};
//
// vk::ImageCreateInfo imageCreateInfo = {
// .flags = vk::ImageCreateFlagBits::eCubeCompatible,
// .imageType = vk::ImageType::e2D,
// .format = imageFormat,
// .extent = extent,
// .mipLevels = mipLevels,
// .arrayLayers = 6,
// .samples = vk::SampleCountFlagBits::e1,
// .tiling = vk::ImageTiling::eOptimal,
// .usage = usage,
// .sharingMode = vk::SharingMode::eExclusive,
// .initialLayout = vk::ImageLayout::eUndefined,
// };
// constexpr VmaAllocationCreateInfo allocationCreateInfo = {
// .flags = {},
// .usage = VMA_MEMORY_USAGE_AUTO,
// };
//
// VkImage image;
// VmaAllocation allocation;
// auto result = static_cast<vk::Result>(vmaCreateImage(device->m_Allocator, reinterpret_cast<VkImageCreateInfo
// *>(&imageCreateInfo),
// &allocationCreateInfo, &image, &allocation, nullptr));
// ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", name, result) THEN_ABORT(result);
//
// vk::ImageView view;
// vk::ImageViewCreateInfo imageViewCreateInfo = {
// .image = image,
// .viewType = vk::ImageViewType::eCube,
// .format = imageFormat,
// .components = {},
// .subresourceRange =
// {
// .aspectMask = vk::ImageAspectFlagBits::eColor,
// .baseMipLevel = 0,
// .levelCount = mipLevels,
// .baseArrayLayer = 0,
// .layerCount = 6,
// },
// };
// result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
// ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", name, result) THEN_ABORT(result);
//
// m_Device = device;
// m_Image = image;
// m_View = view;
// m_Allocation = allocation;
// m_Extent = extent;
// m_MipLevels = mipLevels;
// m_LayerCount = 6;
//
// device->SetName(m_Image, name);
// }
//
// void
// AttachmentImage::Init(const Device *device, vk::Extent2D extent, vk::Format imageFormat, cstr name)
//{
// vk::ImageCreateInfo imageCreateInfo = {
// .imageType = vk::ImageType::e2D,
// .format = imageFormat,
// .extent = ToExtent3D(extent, 1),
// .mipLevels = 1,
// .arrayLayers = 1,
// .samples = vk::SampleCountFlagBits::e1,
// .tiling = vk::ImageTiling::eOptimal,
// .usage = vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc,
// .sharingMode = vk::SharingMode::eExclusive,
// .initialLayout = vk::ImageLayout::eUndefined,
// };
// constexpr VmaAllocationCreateInfo allocationCreateInfo = {
// .flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
// .usage = VMA_MEMORY_USAGE_AUTO,
// };
//
// VkImage image;
// VmaAllocation allocation;
// auto result = static_cast<vk::Result>(vmaCreateImage(device->m_Allocator, reinterpret_cast<VkImageCreateInfo
// *>(&imageCreateInfo),
// &allocationCreateInfo, &image, &allocation, nullptr));
// ERROR_IF(Failed(result), "Could not allocate depth buffer. Cause: {}", result) THEN_ABORT(result);
//
// vk::ImageView view;
// vk::ImageViewCreateInfo imageViewCreateInfo = {
// .image = image,
// .viewType = vk::ImageViewType::e2D,
// .format = imageFormat,
// .components = {},
// .subresourceRange =
// {
// .aspectMask = vk::ImageAspectFlagBits::eColor,
// .baseMipLevel = 0,
// .levelCount = 1,
// .baseArrayLayer = 0,
// .layerCount = 1,
// },
// };
// result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
// ERROR_IF(Failed(result), "Could not create attachment image view {}. Cause: {}", name, result)
// THEN_ABORT(result);
//
// m_Device = device;
// m_Image = image;
// m_View = view;
// m_Allocation = allocation;
// m_Extent = imageCreateInfo.extent;
// m_MipLevels = 1;
// m_LayerCount = 1;
//
// device->SetName(m_Image, name);
// }
//
// void
// DepthImage::Init(const Device *device, vk::Extent2D extent, cstr name)
//{
// constexpr vk::Format imageFormat = vk::Format::eD24UnormS8Uint;
// vk::ImageCreateInfo imageCreateInfo = {
// .imageType = vk::ImageType::e2D,
// .format = imageFormat,
// .extent = ToExtent3D(extent, 1),
// .mipLevels = 1,
// .arrayLayers = 1,
// .samples = vk::SampleCountFlagBits::e1,
// .tiling = vk::ImageTiling::eOptimal,
// .usage = vk::ImageUsageFlagBits::eDepthStencilAttachment,
// .sharingMode = vk::SharingMode::eExclusive,
// .initialLayout = vk::ImageLayout::eUndefined,
// };
// constexpr VmaAllocationCreateInfo allocationCreateInfo = {
// .flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
// .usage = VMA_MEMORY_USAGE_AUTO,
// };
//
// VkImage image;
// VmaAllocation allocation;
// auto result = static_cast<vk::Result>(vmaCreateImage(device->m_Allocator, reinterpret_cast<VkImageCreateInfo
// *>(&imageCreateInfo),
// &allocationCreateInfo, &image, &allocation, nullptr));
// ERROR_IF(Failed(result), "Could not allocate depth buffer. Cause: {}", result) THEN_ABORT(result);
//
// vk::ImageView view;
// vk::ImageViewCreateInfo imageViewCreateInfo = {
// .image = image,
// .viewType = vk::ImageViewType::e2D,
// .format = imageFormat,
// .components = {},
// .subresourceRange =
// {
// .aspectMask = vk::ImageAspectFlagBits::eDepth,
// .baseMipLevel = 0,
// .levelCount = 1,
// .baseArrayLayer = 0,
// .layerCount = 1,
// },
// };
// result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
// ERROR_IF(Failed(result), "Could not create depth image view {}. Cause: {}", name, result) THEN_ABORT(result);
//
// m_Device = device;
// m_Image = image;
// m_View = view;
// m_Allocation = allocation;
// m_Extent = imageCreateInfo.extent;
// m_MipLevels = 1;
// m_LayerCount = 1;
//
// device->SetName(m_Image, name);
// }
//
// void
// StorageTexture::Init(const Device *device, vk::Extent2D extent, const vk::Format imageFormat, const bool isSampled,
// cstr name)
//{
// // Reasoning:
// // Transfer Src and Dst to copy to and from the buffer since Storage will often be loaded with info, and read for
// // results.
// auto usage =
// vk::ImageUsageFlagBits::eStorage | vk::ImageUsageFlagBits::eTransferSrc |
// vk::ImageUsageFlagBits::eTransferDst;
// if (isSampled)
// {
// WARN_IF(!IsPowerOfTwo(extent.width) || !IsPowerOfTwo(extent.width), "Image {2} is {0}x{1} (Non Power of
// Two)",
// extent.width, extent.height, name ? name : "<unnamed>");
// usage |= vk::ImageUsageFlagBits::eSampled;
// }
//
// vk::ImageCreateInfo imageCreateInfo = {
// .imageType = vk::ImageType::e2D,
// .format = imageFormat,
// .extent = ToExtent3D(extent, 1),
// .mipLevels = 1,
// .arrayLayers = 1,
// .samples = vk::SampleCountFlagBits::e1,
// .tiling = vk::ImageTiling::eOptimal,
// .usage = usage,
// .sharingMode = vk::SharingMode::eExclusive,
// .initialLayout = vk::ImageLayout::eUndefined,
// };
// constexpr VmaAllocationCreateInfo allocationCreateInfo = {
// .flags = {},
// .usage = VMA_MEMORY_USAGE_AUTO,
// };
//
// VkImage image;
// VmaAllocation allocation;
// auto result = static_cast<vk::Result>(vmaCreateImage(device->m_Allocator, reinterpret_cast<VkImageCreateInfo
// *>(&imageCreateInfo),
// &allocationCreateInfo, &image, &allocation, nullptr));
// ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", name, result) THEN_ABORT(result);
//
// vk::ImageView view;
// const vk::ImageViewCreateInfo imageViewCreateInfo = {
// .image = image,
// .viewType = vk::ImageViewType::e2D,
// .format = imageFormat,
// .components = {},
// .subresourceRange =
// {
// .aspectMask = vk::ImageAspectFlagBits::eColor,
// .baseMipLevel = 0,
// .levelCount = 1,
// .baseArrayLayer = 0,
// .layerCount = 1,
// },
// };
// result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
// ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", name, result) THEN_ABORT(result);
//
// m_Device = device;
// m_Image = image;
// m_View = view;
// m_Allocation = allocation;
// m_Extent = imageCreateInfo.extent;
// m_MipLevels = 1;
// m_LayerCount = 1;
//
// device->SetName(m_Image, name);
// }
//
// void
// StorageTextureCube::Init(const Device *device, u32 cubeSide, vk::Format imageFormat, bool isSampled, bool
// isMipMapped,
// cstr name)
//{
// // Reasoning:
// // Transfer Src and Dst to copy to and from the buffer since Storage will often be loaded with info, and read for
// // results.
// auto usage =
// vk::ImageUsageFlagBits::eStorage | vk::ImageUsageFlagBits::eTransferSrc |
// vk::ImageUsageFlagBits::eTransferDst;
// if (isSampled)
// {
// WARN_IF(!IsPowerOfTwo(cubeSide), "Image {1} is {0}x{0} (Non Power of Two)", cubeSide,
// name ? name : "<unnamed>");
// usage |= vk::ImageUsageFlagBits::eSampled;
// }
//
// const u8 mipLevels = isMipMapped ? 1 + static_cast<u8>(floor(log2(cubeSide))) : 1;
//
// vk::ImageCreateInfo imageCreateInfo = {
// .flags = vk::ImageCreateFlagBits::eCubeCompatible,
// .imageType = vk::ImageType::e2D,
// .format = imageFormat,
// .extent = {cubeSide, cubeSide, 1},
// .mipLevels = mipLevels,
// .arrayLayers = 6,
// .samples = vk::SampleCountFlagBits::e1,
// .tiling = vk::ImageTiling::eOptimal,
// .usage = usage,
// .sharingMode = vk::SharingMode::eExclusive,
// .initialLayout = vk::ImageLayout::eUndefined,
// };
// constexpr VmaAllocationCreateInfo allocationCreateInfo = {
// .flags = {},
// .usage = VMA_MEMORY_USAGE_AUTO,
// };
//
// VkImage image;
// VmaAllocation allocation;
// auto result = static_cast<vk::Result>(vmaCreateImage(device->m_Allocator, reinterpret_cast<VkImageCreateInfo
// *>(&imageCreateInfo),
// &allocationCreateInfo, &image, &allocation, nullptr));
// ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", name, result) THEN_ABORT(result);
//
// vk::ImageView view;
// const vk::ImageViewCreateInfo imageViewCreateInfo = {
// .image = image,
// .viewType = vk::ImageViewType::eCube,
// .format = imageFormat,
// .components = {},
// .subresourceRange =
// {
// .aspectMask = vk::ImageAspectFlagBits::eColor,
// .baseMipLevel = 0,
// .levelCount = mipLevels,
// .baseArrayLayer = 0,
// .layerCount = 6,
// },
// };
// result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
// ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", name, result) THEN_ABORT(result);
//
// m_Device = device;
// m_Image = image;
// m_View = view;
// m_Allocation = allocation;
// m_Extent = imageCreateInfo.extent;
// m_MipLevels = mipLevels;
// m_LayerCount = 6;
//
// device->SetName(m_Image, name);
// }
Image::Image(Image &&other) noexcept
: m_Device{Take(other.m_Device)}
, m_Image{Take(other.m_Image)}
, m_Allocation{Take(other.m_Allocation)}
, m_Extent{other.m_Extent}
, m_Format{other.m_Format}
, m_EmptyPadding_{other.m_EmptyPadding_}
, m_Flags_{other.m_Flags_}
, m_LayerCount{other.m_LayerCount}
, m_MipLevels{other.m_MipLevels}
{
WARN_IF(!IsPowerOfTwo(cubeSide), "Image Cube {1} has side {0}x{0} (Non Power of Two)", cubeSide, name ? name : "<unnamed>");
const u8 mipLevels = isMipMapped ? 1 + Cast<u8>(floor(log2(cubeSide))) : 1;
auto usage = vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferDst;
if (isMipMapped)
{
usage |= vk::ImageUsageFlagBits::eTransferSrc;
}
const vk::Extent3D extent = {.width = cubeSide, .height = cubeSide, .depth = 1};
vk::ImageCreateInfo imageCreateInfo = {
.flags = vk::ImageCreateFlagBits::eCubeCompatible,
.imageType = vk::ImageType::e2D,
.format = imageFormat,
.extent = extent,
.mipLevels = mipLevels,
.arrayLayers = 6,
.samples = vk::SampleCountFlagBits::e1,
.tiling = vk::ImageTiling::eOptimal,
.usage = usage,
.sharingMode = vk::SharingMode::eExclusive,
.initialLayout = vk::ImageLayout::eUndefined,
};
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = {},
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
auto result = Cast<vk::Result>(vmaCreateImage(device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", name, result) THEN_ABORT(result);
vk::ImageView view;
vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::eCube,
.format = imageFormat,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = mipLevels,
.baseArrayLayer = 0,
.layerCount = 6,
},
};
result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", name, result) THEN_ABORT(result);
m_Image = image;
m_View = view;
m_Allocation = allocation;
m_Extent = extent;
m_MipLevels = mipLevels;
m_Flags_ = OWNED_BIT | VALID_BIT;
m_LayerCount = 6;
device->SetName(m_Image, name);
}
void
AttachmentImage::Init(const Device *device, vk::Extent2D extent, vk::Format imageFormat, cstr name)
Image::Image(Device const *device, vk::Image const image, VmaAllocation const allocation, vk::Extent3D const extent,
vk::Format const format, Flags const flags, u8 const layerCount, u8 const mipLevels)
: m_Device{device}
, m_Image{image}
, m_Allocation{allocation}
, m_Extent{extent}
, m_Format{format}
, m_Flags_{flags}
, m_LayerCount{layerCount}
, m_MipLevels{mipLevels}
{
vk::ImageCreateInfo imageCreateInfo = {
.imageType = vk::ImageType::e2D,
.format = imageFormat,
.extent = ToExtent3D(extent, 1),
.mipLevels = 1,
.arrayLayers = 1,
.samples = vk::SampleCountFlagBits::e1,
.tiling = vk::ImageTiling::eOptimal,
.usage = vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc,
.sharingMode = vk::SharingMode::eExclusive,
.initialLayout = vk::ImageLayout::eUndefined,
};
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
auto result = Cast<vk::Result>(vmaCreateImage(device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate depth buffer. Cause: {}", result) THEN_ABORT(result);
vk::ImageView view;
vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::e2D,
.format = imageFormat,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create attachment image view {}. Cause: {}", name, result) THEN_ABORT(result);
m_Image = image;
m_View = view;
m_Allocation = allocation;
m_Extent = imageCreateInfo.extent;
m_MipLevels = 1;
m_Flags_ = OWNED_BIT | VALID_BIT;
m_LayerCount = 1;
device->SetName(m_Image, name);
}
void
DepthImage::Init(const Device *device, vk::Extent2D extent, cstr name)
{
constexpr vk::Format imageFormat = vk::Format::eD24UnormS8Uint;
vk::ImageCreateInfo imageCreateInfo = {
.imageType = vk::ImageType::e2D,
.format = imageFormat,
.extent = ToExtent3D(extent, 1),
.mipLevels = 1,
.arrayLayers = 1,
.samples = vk::SampleCountFlagBits::e1,
.tiling = vk::ImageTiling::eOptimal,
.usage = vk::ImageUsageFlagBits::eDepthStencilAttachment,
.sharingMode = vk::SharingMode::eExclusive,
.initialLayout = vk::ImageLayout::eUndefined,
};
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
auto result = Cast<vk::Result>(vmaCreateImage(device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate depth buffer. Cause: {}", result) THEN_ABORT(result);
vk::ImageView view;
vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::e2D,
.format = imageFormat,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eDepth,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create depth image view {}. Cause: {}", name, result) THEN_ABORT(result);
m_Image = image;
m_View = view;
m_Allocation = allocation;
m_Extent = imageCreateInfo.extent;
m_MipLevels = 1;
m_Flags_ = OWNED_BIT | VALID_BIT;
m_LayerCount = 1;
device->SetName(m_Image, name);
}
void
StorageTexture::Init(const Device *device, vk::Extent2D extent, const vk::Format imageFormat, const bool isSampled,
cstr name)
{
// Reasoning:
// Transfer Src and Dst to copy to and from the buffer since Storage will often be loaded with info, and read for
// results.
auto usage =
vk::ImageUsageFlagBits::eStorage | vk::ImageUsageFlagBits::eTransferSrc | vk::ImageUsageFlagBits::eTransferDst;
if (isSampled)
{
WARN_IF(!IsPowerOfTwo(extent.width) || !IsPowerOfTwo(extent.width), "Image {2} is {0}x{1} (Non Power of Two)",
extent.width, extent.height, name ? name : "<unnamed>");
usage |= vk::ImageUsageFlagBits::eSampled;
}
vk::ImageCreateInfo imageCreateInfo = {
.imageType = vk::ImageType::e2D,
.format = imageFormat,
.extent = ToExtent3D(extent, 1),
.mipLevels = 1,
.arrayLayers = 1,
.samples = vk::SampleCountFlagBits::e1,
.tiling = vk::ImageTiling::eOptimal,
.usage = usage,
.sharingMode = vk::SharingMode::eExclusive,
.initialLayout = vk::ImageLayout::eUndefined,
};
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = {},
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
auto result = Cast<vk::Result>(vmaCreateImage(device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", name, result) THEN_ABORT(result);
vk::ImageView view;
const vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::e2D,
.format = imageFormat,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", name, result) THEN_ABORT(result);
m_Image = image;
m_View = view;
m_Allocation = allocation;
m_Extent = imageCreateInfo.extent;
m_MipLevels = 1;
m_Flags_ = OWNED_BIT | VALID_BIT;
m_LayerCount = 1;
device->SetName(m_Image, name);
}
void
StorageTextureCube::Init(const Device *device, u32 cubeSide, vk::Format imageFormat, bool isSampled, bool isMipMapped,
cstr name)
{
// Reasoning:
// Transfer Src and Dst to copy to and from the buffer since Storage will often be loaded with info, and read for
// results.
auto usage =
vk::ImageUsageFlagBits::eStorage | vk::ImageUsageFlagBits::eTransferSrc | vk::ImageUsageFlagBits::eTransferDst;
if (isSampled)
{
WARN_IF(!IsPowerOfTwo(cubeSide), "Image {1} is {0}x{0} (Non Power of Two)", cubeSide,
name ? name : "<unnamed>");
usage |= vk::ImageUsageFlagBits::eSampled;
}
const u8 mipLevels = isMipMapped ? 1 + Cast<u8>(floor(log2(cubeSide))) : 1;
vk::ImageCreateInfo imageCreateInfo = {
.flags = vk::ImageCreateFlagBits::eCubeCompatible,
.imageType = vk::ImageType::e2D,
.format = imageFormat,
.extent = {cubeSide, cubeSide, 1},
.mipLevels = mipLevels,
.arrayLayers = 6,
.samples = vk::SampleCountFlagBits::e1,
.tiling = vk::ImageTiling::eOptimal,
.usage = usage,
.sharingMode = vk::SharingMode::eExclusive,
.initialLayout = vk::ImageLayout::eUndefined,
};
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = {},
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
auto result = Cast<vk::Result>(vmaCreateImage(device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", name, result) THEN_ABORT(result);
vk::ImageView view;
const vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::eCube,
.format = imageFormat,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = mipLevels,
.baseArrayLayer = 0,
.layerCount = 6,
},
};
result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", name, result) THEN_ABORT(result);
m_Image = image;
m_View = view;
m_Allocation = allocation;
m_Extent = imageCreateInfo.extent;
m_MipLevels = mipLevels;
m_Flags_ = OWNED_BIT | VALID_BIT;
m_LayerCount = 6;
device->SetName(m_Image, name);
}

View File

@ -1,48 +1,46 @@
// =============================================
// Aster: context.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "core/context.h"
#include "core/instance.h"
#include "core/window.h"
#include <EASTL/array.h>
#include <EASTL/fixed_vector.h>
VKAPI_ATTR b32 VKAPI_CALL
DebugCallback(const VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
const VkDebugUtilsMessageTypeFlagsEXT messageType,
const VkDebugUtilsMessengerCallbackDataEXT *callbackData, [[maybe_unused]] void *userData)
DebugCallback(vk::DebugUtilsMessageSeverityFlagBitsEXT const messageSeverity,
vk::DebugUtilsMessageTypeFlagsEXT const messageType,
vk::DebugUtilsMessengerCallbackDataEXT const *callbackData, [[maybe_unused]] void *userData)
{
using Severity = vk::DebugUtilsMessageSeverityFlagsEXT;
using SeverityBits = vk::DebugUtilsMessageSeverityFlagBitsEXT;
using MessageType = vk::DebugUtilsMessageTypeFlagsEXT;
using MessageTypeBits = vk::DebugUtilsMessageTypeFlagBitsEXT;
const auto severity = Severity(messageSeverity);
if (MessageType(messageType) & MessageTypeBits::eValidation)
if (messageType & MessageTypeBits::eValidation)
{
if (severity & SeverityBits::eError)
if (messageSeverity & SeverityBits::eError)
ERROR("{}", callbackData->pMessage);
if (severity & SeverityBits::eWarning)
if (messageSeverity & SeverityBits::eWarning)
WARN("{}", callbackData->pMessage);
if (severity & SeverityBits::eInfo)
if (messageSeverity & SeverityBits::eInfo)
INFO("{}", callbackData->pMessage);
if (severity & SeverityBits::eVerbose)
if (messageSeverity & SeverityBits::eVerbose)
VERBOSE("{}", callbackData->pMessage);
}
return false;
}
Context::Context(const cstr appName, const Version version, bool enableValidation)
Instance::Instance(cstr const appName, Version const version, bool enableValidation)
{
INFO_IF(enableValidation, "Validation Layers enabled");
// TODO Get/Check API Version
// Creating Instance
const vk::ApplicationInfo appInfo = {
vk::ApplicationInfo const appInfo = {
.pApplicationName = appName,
.applicationVersion = version.GetVkVersion(),
.pEngineName = PROJECT_NAME,
@ -50,7 +48,7 @@ Context::Context(const cstr appName, const Version version, bool enableValidatio
.apiVersion = ASTER_API_VERSION,
};
const vk::DebugUtilsMessengerCreateInfoEXT debugUtilsMessengerCreateInfo = {
vk::DebugUtilsMessengerCreateInfoEXT const debugUtilsMessengerCreateInfo = {
.messageSeverity = vk::DebugUtilsMessageSeverityFlagBitsEXT::eError |
vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning |
vk::DebugUtilsMessageSeverityFlagBitsEXT::eInfo,
@ -61,23 +59,23 @@ Context::Context(const cstr appName, const Version version, bool enableValidatio
.pUserData = nullptr,
};
u32 glfwExtensionCount = 0;
cstr *glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount);
eastl::fixed_vector<cstr, 3> instanceExtensions(glfwExtensions, glfwExtensions + glfwExtensionCount);
u32 windowExtensionCount = 0;
cstr *windowExtensions = Window::GetInstanceExtensions(&windowExtensionCount);
eastl::fixed_vector<cstr, 3> instanceExtensions(windowExtensions, windowExtensions + windowExtensionCount);
if (enableValidation)
{
instanceExtensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
}
const vk::DynamicLoader dl;
vk::detail::DynamicLoader const dl;
// ReSharper disable once CppInconsistentNaming
const auto vkGetInstanceProcAddr = dl.getProcAddress<PFN_vkGetInstanceProcAddr>("vkGetInstanceProcAddr");
auto const vkGetInstanceProcAddr = dl.getProcAddress<PFN_vkGetInstanceProcAddr>("vkGetInstanceProcAddr");
VULKAN_HPP_DEFAULT_DISPATCHER.init(vkGetInstanceProcAddr);
const auto instanceCreateInfo = vk::InstanceCreateInfo{
auto const instanceCreateInfo = vk::InstanceCreateInfo{
.pNext = enableValidation ? &debugUtilsMessengerCreateInfo : nullptr,
.pApplicationInfo = &appInfo,
.enabledExtensionCount = Cast<u32>(instanceExtensions.size()),
.enabledExtensionCount = static_cast<u32>(instanceExtensions.size()),
.ppEnabledExtensionNames = instanceExtensions.data(),
};
@ -97,8 +95,11 @@ Context::Context(const cstr appName, const Version version, bool enableValidatio
}
}
Context::~Context()
Instance::~Instance()
{
if (!m_Instance)
return;
if (m_DebugMessenger)
{
m_Instance.destroy(m_DebugMessenger, nullptr);
@ -108,14 +109,14 @@ Context::~Context()
DEBUG("Instance destroyed");
}
Context::Context(Context &&other) noexcept
Instance::Instance(Instance &&other) noexcept
: m_Instance(Take(other.m_Instance))
, m_DebugMessenger(Take(other.m_DebugMessenger))
{
}
Context &
Context::operator=(Context &&other) noexcept
Instance &
Instance::operator=(Instance &&other) noexcept
{
if (this == &other)
return *this;

View File

@ -1,15 +1,15 @@
// =============================================
// Aster: physical_device.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "core/physical_device.h"
#include "core/context.h"
#include "core/instance.h"
#include "core/surface.h"
[[nodiscard]] vk::SurfaceCapabilitiesKHR
GetSurfaceCapabilities(const vk::PhysicalDevice physicalDevice, const vk::SurfaceKHR surface)
GetSurfaceCapabilities(vk::PhysicalDevice const physicalDevice, vk::SurfaceKHR const surface)
{
vk::SurfaceCapabilitiesKHR surfaceCapabilities;
@ -21,7 +21,7 @@ GetSurfaceCapabilities(const vk::PhysicalDevice physicalDevice, const vk::Surfac
}
[[nodiscard]] eastl::vector<vk::SurfaceFormatKHR>
GetSurfaceFormats(const vk::PhysicalDevice physicalDevice, const vk::SurfaceKHR surface)
GetSurfaceFormats(vk::PhysicalDevice const physicalDevice, vk::SurfaceKHR const surface)
{
// vk::Result::eIncomplete should not occur in this function. The rest are errors. Thus, abort is allowed.
u32 count = 0;
@ -38,7 +38,7 @@ GetSurfaceFormats(const vk::PhysicalDevice physicalDevice, const vk::SurfaceKHR
}
[[nodiscard]] eastl::vector<vk::PresentModeKHR>
GetSurfacePresentModes(const vk::PhysicalDevice physicalDevice, const vk::SurfaceKHR surface)
GetSurfacePresentModes(vk::PhysicalDevice const physicalDevice, vk::SurfaceKHR const surface)
{
// vk::Result::eIncomplete should not occur in this function. The rest are errors. Thus, abort is allowed.
u32 count = 0;
@ -55,11 +55,11 @@ GetSurfacePresentModes(const vk::PhysicalDevice physicalDevice, const vk::Surfac
}
[[nodiscard]] bool
GetQueuePresentSupport(const u32 queueFamilyIndex, const vk::SurfaceKHR surface,
const vk::PhysicalDevice physicalDevice)
GetQueuePresentSupport(u32 const queueFamilyIndex, vk::SurfaceKHR const surface,
vk::PhysicalDevice const physicalDevice)
{
b32 supported = false;
const vk::Result result = physicalDevice.getSurfaceSupportKHR(queueFamilyIndex, surface, &supported);
vk::Result const result = physicalDevice.getSurfaceSupportKHR(queueFamilyIndex, surface, &supported);
ERROR_IF(Failed(result), "Could not get queue family surface support. Cause: {}", result)
THEN_ABORT(result);
@ -67,7 +67,7 @@ GetQueuePresentSupport(const u32 queueFamilyIndex, const vk::SurfaceKHR surface,
}
[[nodiscard]] eastl::fixed_vector<vk::QueueFamilyProperties, 32>
GetQueueFamilyProperties(const vk::PhysicalDevice physicalDevice)
GetQueueFamilyProperties(vk::PhysicalDevice const physicalDevice)
{
// Devices rarely have more than 32 queue families. Thus fixed vector
u32 count = 0;
@ -81,7 +81,7 @@ GetQueueFamilyProperties(const vk::PhysicalDevice physicalDevice)
// Size 384 return.
[[nodiscard]] eastl::vector<QueueFamilyInfo>
GetQueueFamilies(const vk::SurfaceKHR surface, const vk::PhysicalDevice physicalDevice)
GetQueueFamilies(vk::SurfaceKHR const surface, vk::PhysicalDevice const physicalDevice)
{
auto queueFamilyProperties = GetQueueFamilyProperties(physicalDevice);
@ -126,7 +126,7 @@ GetQueueFamilies(const vk::SurfaceKHR surface, const vk::PhysicalDevice physical
return queueFamilyInfos;
}
PhysicalDevice::PhysicalDevice(const vk::SurfaceKHR surface, const vk::PhysicalDevice physicalDevice)
PhysicalDevice::PhysicalDevice(vk::SurfaceKHR const surface, vk::PhysicalDevice const physicalDevice)
{
physicalDevice.getProperties(&m_DeviceProperties);
physicalDevice.getFeatures(&m_DeviceFeatures);
@ -139,7 +139,7 @@ PhysicalDevice::PhysicalDevice(const vk::SurfaceKHR surface, const vk::PhysicalD
}
eastl::fixed_vector<vk::PhysicalDevice, 8>
EnumeratePhysicalDevices(const vk::Instance instance)
EnumeratePhysicalDevices(vk::Instance const instance)
{
u32 count = 0;
vk::Result result = instance.enumeratePhysicalDevices(&count, nullptr);
@ -154,11 +154,10 @@ EnumeratePhysicalDevices(const vk::Instance instance)
return physicalDevices;
}
PhysicalDevices::PhysicalDevices(const Surface *surface, const Context *context)
PhysicalDevices::PhysicalDevices(Surface const &surface, Instance const &context)
{
auto physicalDevices = EnumeratePhysicalDevices(context->m_Instance);
for (auto physicalDevice : physicalDevices)
for (auto physicalDevices = EnumeratePhysicalDevices(context.m_Instance); auto physicalDevice : physicalDevices)
{
this->emplace_back(surface->m_Surface, physicalDevice);
this->emplace_back(surface.m_Surface, physicalDevice);
}
}

View File

@ -1,24 +1,28 @@
// =============================================
// Aster: pipeline.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "core/pipeline.h"
#include "core/device.h"
Pipeline::Pipeline(const Device *device, vk::PipelineLayout layout, vk::Pipeline pipeline,
eastl::vector<vk::DescriptorSetLayout> &&setLayouts)
: m_Device(device)
, m_Layout(layout)
, m_Pipeline(pipeline)
, m_SetLayouts(std::move(setLayouts))
Pipeline::Pipeline(Device const *device, vk::PipelineLayout const layout, vk::Pipeline const pipeline,
eastl::vector<vk::DescriptorSetLayout> &&setLayouts, Kind const kind)
: m_Device{device}
, m_Layout{layout}
, m_Pipeline{pipeline}
, m_SetLayouts{std::move(setLayouts)}
, m_Kind{kind}
{
}
Pipeline::~Pipeline()
{
for (const auto setLayout : m_SetLayouts)
if (!m_Device || !m_Pipeline)
return;
for (auto const setLayout : m_SetLayouts)
{
m_Device->m_Device.destroy(setLayout, nullptr);
}

View File

@ -0,0 +1,40 @@
// =============================================
// Aster: sampler.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "core/sampler.h"
#include "core/device.h"
Sampler::~Sampler()
{
if (!IsValid())
return;
m_Device->m_Device.destroy(Take(m_Sampler), nullptr);
}
Sampler::Sampler(Device const *device, vk::SamplerCreateInfo const &samplerCreateInfo, cstr name)
{
m_Device = device;
auto const result = device->m_Device.createSampler(&samplerCreateInfo, nullptr, &m_Sampler);
ERROR_IF(Failed(result), "Could not create a sampler {}", name ? name : "<unnamed>") THEN_ABORT(-1);
}
Sampler &
Sampler::operator=(Sampler &&other) noexcept
{
if (this == &other)
return *this;
using std::swap;
swap(m_Device, other.m_Device);
swap(m_Sampler, other.m_Sampler);
return *this;
}
Sampler::Sampler(Sampler &&other) noexcept
: m_Device{other.m_Device}
, m_Sampler{Take(other.m_Sampler)}
{
}

View File

@ -1,20 +1,19 @@
// =============================================
// Aster: surface.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "core/surface.h"
#include "core/context.h"
#include "core/instance.h"
#include "core/window.h"
Surface::Surface(Context *context, const Window *window, cstr name)
: m_Context(context)
, m_Name(name)
Surface::Surface(Instance &context, Window const &window)
: m_Context(&context)
{
VkSurfaceKHR surface;
auto result = Cast<vk::Result>(
glfwCreateWindowSurface(Cast<VkInstance>(m_Context->m_Instance), window->m_Window, nullptr, &surface));
auto result = static_cast<vk::Result>(
glfwCreateWindowSurface(static_cast<VkInstance>(m_Context->m_Instance), window.m_Window, nullptr, &surface));
ERROR_IF(Failed(result), "Failed to create Surface with {}", result)
THEN_ABORT(result)
ELSE_DEBUG("Surface {} Created", m_Name);
@ -23,14 +22,14 @@ Surface::Surface(Context *context, const Window *window, cstr name)
Surface::~Surface()
{
if (m_Context && m_Surface)
{
m_Context->m_Instance.destroy(m_Surface, nullptr);
DEBUG("Surface Destroyed");
if (!m_Context || !m_Context->m_Instance || !m_Surface)
return;
m_Surface = nullptr;
m_Context = nullptr;
}
m_Context->m_Instance.destroy(m_Surface, nullptr);
DEBUG("Surface Destroyed");
m_Surface = nullptr;
m_Context = nullptr;
}
Surface::Surface(Surface &&other) noexcept

View File

@ -1,6 +1,6 @@
/// =============================================
// Aster: swapchain.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// ==============================================
#include "core/swapchain.h"
@ -11,9 +11,8 @@
[[nodiscard]] vk::Extent2D GetExtent(Size2D size, vk::SurfaceCapabilitiesKHR *surfaceCapabilities);
Swapchain::Swapchain(const Surface *surface, const Device *device, Size2D size, NameString &&name)
: m_Device(device)
, m_Name(std::move(name))
Swapchain::Swapchain(Surface const &surface, Device const &device, Size2D size)
: m_Device(&device)
, m_Format(vk::Format::eUndefined)
{
this->Create(surface, size);
@ -27,11 +26,11 @@ Swapchain::~Swapchain()
Swapchain::Swapchain(Swapchain &&other) noexcept
: m_Device(other.m_Device)
, m_Swapchain(Take(other.m_Swapchain))
, m_Name(std::move(other.m_Name))
, m_Extent(other.m_Extent)
, m_Format(other.m_Format)
, m_Images(std::move(other.m_Images))
, m_ImageViews(std::move(other.m_ImageViews))
, m_ResizeCallbacks(std::move(other.m_ResizeCallbacks))
{
}
@ -42,32 +41,32 @@ Swapchain::operator=(Swapchain &&other) noexcept
return *this;
m_Device = other.m_Device;
m_Swapchain = Take(other.m_Swapchain);
m_Name = std::move(other.m_Name);
m_Extent = other.m_Extent;
m_Format = other.m_Format;
m_Images = std::move(other.m_Images);
m_ImageViews = std::move(other.m_ImageViews);
m_ResizeCallbacks = std::move(other.m_ResizeCallbacks);
return *this;
}
void
Swapchain::Create(const Surface *surface, Size2D size)
Swapchain::Create(Surface const &surface, Size2D size)
{
auto surfaceCapabilities = GetSurfaceCapabilities(m_Device->m_PhysicalDevice, surface->m_Surface);
auto surfaceCapabilities = GetSurfaceCapabilities(m_Device->m_PhysicalDevice, surface.m_Surface);
m_Extent = GetExtent(size, &surfaceCapabilities);
while (m_Extent.width == 0 || m_Extent.height == 0)
{
glfwWaitEvents();
surfaceCapabilities = GetSurfaceCapabilities(m_Device->m_PhysicalDevice, surface->m_Surface);
surfaceCapabilities = GetSurfaceCapabilities(m_Device->m_PhysicalDevice, surface.m_Surface);
m_Extent = GetExtent(size, &surfaceCapabilities);
}
auto surfaceFormats = GetSurfaceFormats(m_Device->m_PhysicalDevice, surface->m_Surface);
auto presentModes = GetSurfacePresentModes(m_Device->m_PhysicalDevice, surface->m_Surface);
auto surfaceFormats = GetSurfaceFormats(m_Device->m_PhysicalDevice, surface.m_Surface);
auto presentModes = GetSurfacePresentModes(m_Device->m_PhysicalDevice, surface.m_Surface);
m_Format = vk::Format::eUndefined;
vk::ColorSpaceKHR swapchainColorSpace = vk::ColorSpaceKHR::eSrgbNonlinear;
auto swapchainColorSpace = vk::ColorSpaceKHR::eSrgbNonlinear;
for (auto [format, colorSpace] : surfaceFormats)
{
if (format == vk::Format::eB8G8R8A8Srgb && colorSpace == vk::ColorSpaceKHR::eSrgbNonlinear)
@ -84,8 +83,8 @@ Swapchain::Create(const Surface *surface, Size2D size)
swapchainColorSpace = colorSpace;
}
vk::PresentModeKHR swapchainPresentMode = vk::PresentModeKHR::eFifo;
for (const auto presentMode : presentModes)
auto swapchainPresentMode = vk::PresentModeKHR::eFifo;
for (auto const presentMode : presentModes)
{
if (presentMode == vk::PresentModeKHR::eMailbox)
{
@ -95,16 +94,14 @@ Swapchain::Create(const Surface *surface, Size2D size)
}
u32 swapchainImageCount = 3;
if (surfaceCapabilities.maxImageCount > 0)
{
swapchainImageCount =
glm::clamp(swapchainImageCount, surfaceCapabilities.minImageCount, surfaceCapabilities.maxImageCount);
}
u32 maxImageCount =
glm::max(swapchainImageCount, glm::max(surfaceCapabilities.maxImageCount, surfaceCapabilities.minImageCount));
swapchainImageCount = glm::clamp(swapchainImageCount, surfaceCapabilities.minImageCount, maxImageCount);
// TODO: Note that different queues might need the images to be shared.
const vk::SwapchainCreateInfoKHR swapchainCreateInfo = {
.surface = surface->m_Surface,
vk::SwapchainCreateInfoKHR const swapchainCreateInfo = {
.surface = surface.m_Surface,
.minImageCount = swapchainImageCount,
.imageFormat = m_Format,
.imageColorSpace = swapchainColorSpace,
@ -120,28 +117,30 @@ Swapchain::Create(const Surface *surface, Size2D size)
};
vk::Device device = m_Device->m_Device;
NameString name = "Swapchain of ";
name += m_Device->m_Name;
vk::SwapchainKHR swapchain;
vk::Result result = device.createSwapchainKHR(&swapchainCreateInfo, nullptr, &swapchain);
ERROR_IF(Failed(result), "Swapchain {} creation failed. Cause {}", m_Name, result)
ERROR_IF(Failed(result), "'{}' creation failed. Cause {}", name, result)
THEN_ABORT(result)
ELSE_DEBUG("Created Swapchain '{}'", m_Name);
ELSE_DEBUG("Created '{}'", name);
// Irrelevant on the first run. Required for re-creation.
Cleanup();
m_Swapchain = swapchain;
m_Device->SetName(m_Swapchain, m_Name.data());
m_Device->SetName(m_Swapchain, m_Device->m_Name.data());
result = device.getSwapchainImagesKHR(m_Swapchain, &swapchainImageCount, nullptr);
ERROR_IF(Failed(result), "Failed getting swapchain {}'s images. Cause {}", m_Name, result)
ERROR_IF(Failed(result), "Failed getting {}'s images. Cause {}", name, result)
THEN_ABORT(result);
// Managed by the Swapchain.
m_Images.resize(swapchainImageCount);
m_Images.resize(swapchainImageCount, nullptr);
result = device.getSwapchainImagesKHR(m_Swapchain, &swapchainImageCount, m_Images.data());
ERROR_IF(Failed(result), "Failed getting swapchain {}'s images. Cause {}", m_Name, result)
ERROR_IF(Failed(result), "Failed getting {}'s images. Cause {}", name, result)
THEN_ABORT(result);
vk::ImageViewCreateInfo viewCreateInfo = {
@ -165,7 +164,7 @@ Swapchain::Create(const Surface *surface, Size2D size)
vk::ImageView imageView;
result = device.createImageView(&viewCreateInfo, nullptr, &imageView);
ERROR_IF(Failed(result), "Failed creating swapchain {}'s image view [{}]. Cause {}", m_Name, index, result)
ERROR_IF(Failed(result), "Failed creating {}'s image view [{}]. Cause {}", name, index, result)
THEN_ABORT(result);
m_ImageViews.push_back(imageView);
@ -173,7 +172,7 @@ Swapchain::Create(const Surface *surface, Size2D size)
++index;
}
DEBUG("Swapchain {} Image Views created.", m_Name);
DEBUG("{} Image Views created.", name);
for (auto &callback : m_ResizeCallbacks)
{
@ -184,24 +183,31 @@ Swapchain::Create(const Surface *surface, Size2D size)
void
Swapchain::RegisterResizeCallback(FnResizeCallback &&callback)
{
m_ResizeCallbacks.emplace_back(callback);
m_ResizeCallbacks.emplace_back(std::move(callback));
}
void
Swapchain::Cleanup()
{
if (!m_ImageViews.empty()) // Don't want the condition in the logs.
DEBUG("Swapchain {} Image Views destroyed.", m_Name);
for (const auto imageView : m_ImageViews)
if (!m_Swapchain)
return;
NameString name = "Swapchain of ";
name += m_Device->m_Name;
for (auto const imageView : m_ImageViews)
{
m_Device->m_Device.destroy(imageView, nullptr);
}
if (!m_ImageViews.empty()) // Don't want the condition in the logs.
DEBUG("Swapchain {} Image Views destroyed.", name);
m_ImageViews.clear();
m_Images.clear();
if (m_Swapchain)
{
m_Device->m_Device.destroy(m_Swapchain, nullptr);
m_Swapchain = nullptr;
DEBUG("Swapchain '{}' destroyed.", m_Name);
DEBUG("Swapchain '{}' destroyed.", name);
}
}

View File

@ -1,16 +1,40 @@
// =============================================
// Aster: window.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "core/window.h"
#include "core/context.h"
#include "core/instance.h"
#include "util/logger.h"
std::atomic_uint64_t Window::m_WindowCount = 0;
std::atomic_bool Window::m_IsGlfwInit = false;
void
Window::SetupLibrary()
{
if (!m_IsGlfwInit)
{
if (!glfwInit())
{
char const *error = nullptr;
auto const code = glfwGetError(&error);
ERROR("GLFW Init failed. Cause: ({}) {}", code, error)
THEN_ABORT(code);
}
m_WindowCount = 0;
m_IsGlfwInit = true;
}
}
cstr *
Window::GetInstanceExtensions(u32 *extensionCount)
{
SetupLibrary();
return glfwGetRequiredInstanceExtensions(extensionCount);
}
void
Window::RequestExit() const noexcept
{
@ -18,15 +42,15 @@ Window::RequestExit() const noexcept
}
void
Window::SetWindowSize(const vk::Extent2D &extent) const noexcept
Window::SetWindowSize(vk::Extent2D const &extent) const noexcept
{
SetWindowSize(extent.width, extent.height);
}
void
Window::SetWindowSize(const u32 width, const u32 height) const noexcept
Window::SetWindowSize(u32 const width, u32 const height) const noexcept
{
glfwSetWindowSize(m_Window, Cast<i32>(width), Cast<i32>(height));
glfwSetWindowSize(m_Window, static_cast<i32>(width), static_cast<i32>(height));
}
Size2D
@ -35,25 +59,14 @@ Window::GetSize() const
int width;
int height;
glfwGetFramebufferSize(m_Window, &width, &height);
return {Cast<u32>(width), Cast<u32>(height)};
return {static_cast<u32>(width), static_cast<u32>(height)};
}
Window::Window(const cstr title, Size2D extent, const b8 isFullScreen)
Window::Window(cstr const title, Size2D extent, b8 const isFullScreen)
{
m_Name = title;
if (!m_IsGlfwInit)
{
if (!glfwInit())
{
const char *error = nullptr;
const auto code = glfwGetError(&error);
ERROR("GLFW Init failed. Cause: ({}) {}", code, error)
THEN_ABORT(code);
}
m_WindowCount = 0;
m_IsGlfwInit = true;
}
SetupLibrary();
GLFWmonitor *monitor = glfwGetPrimaryMonitor();
ERROR_IF(!monitor, "No monitor found");
@ -64,22 +77,22 @@ Window::Window(const cstr title, Size2D extent, const b8 isFullScreen)
glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
glfwWindowHint(GLFW_CENTER_CURSOR, GLFW_TRUE);
m_Window = glfwCreateWindow(Cast<i32>(extent.m_Width), Cast<i32>(extent.m_Height), m_Name.c_str(),
m_Window = glfwCreateWindow(static_cast<i32>(extent.m_Width), static_cast<i32>(extent.m_Height), m_Name.c_str(),
isFullScreen ? monitor : nullptr, nullptr);
ERROR_IF(m_Window == nullptr, "Window creation failed")
ELSE_DEBUG("Window '{}' created with resolution '{}x{}'", m_Name, extent.m_Width, extent.m_Height);
if (m_Window == nullptr)
{
const char *error = nullptr;
const auto code = glfwGetError(&error);
char const *error = nullptr;
auto const code = glfwGetError(&error);
ERROR("GLFW Window Creation failed. Cause: ({}) {}", code, error)
THEN_ABORT(code);
}
if (isFullScreen == false)
{
glfwSetWindowPos(m_Window, Cast<i32>(windowWidth - extent.m_Width) / 2,
Cast<i32>(windowHeight - extent.m_Height) / 2);
glfwSetWindowPos(m_Window, static_cast<i32>(windowWidth - extent.m_Width) / 2,
static_cast<i32>(windowHeight - extent.m_Height) / 2);
}
glfwSetInputMode(m_Window, GLFW_CURSOR, GLFW_CURSOR_NORMAL);
@ -96,7 +109,7 @@ Window::~Window()
--m_WindowCount;
}
if (m_WindowCount== 0 && m_IsGlfwInit)
if (m_WindowCount == 0 && m_IsGlfwInit)
{
glfwTerminate();
m_IsGlfwInit = false;

View File

@ -4,7 +4,8 @@ cmake_minimum_required(VERSION 3.13)
target_sources(aster_core
PRIVATE
"manager.cpp"
"buffer_manager.cpp"
"image_manager.cpp"
"render_resource_manager.cpp")
"rendering_device.cpp"
"commit_manager.cpp"
"pipeline_helpers.cpp"
"context.cpp"
"sync_server.cpp")

View File

@ -1,48 +0,0 @@
// =============================================
// Aster: buffer_manager.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "systems/buffer_manager.h"
Manager<Buffer> *Manager<Buffer>::m_Instance = nullptr;
using namespace systems;
BufferHandle
BufferManager::CreateStorageBuffer(const usize size, const cstr name)
{
auto [handle, object] = Alloc();
// TODO: Storage and Index buffer are set.
// This is hacky and should be improved.
constexpr vk::BufferUsageFlags usage = vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eIndexBuffer |
vk::BufferUsageFlagBits::eShaderDeviceAddress;
constexpr VmaAllocationCreateFlags createFlags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT |
VMA_ALLOCATION_CREATE_MAPPED_BIT;
constexpr VmaMemoryUsage memoryUsage = VMA_MEMORY_USAGE_AUTO;
object->Allocate(m_Device, size, usage, createFlags, memoryUsage, name);
return std::move(handle);
}
Manager<Buffer>::Handle
BufferManager::CreateUniformBuffer(const usize size, const cstr name)
{
auto [handle, object] = Alloc();
constexpr vk::BufferUsageFlags usage = vk::BufferUsageFlagBits::eUniformBuffer;
constexpr VmaAllocationCreateFlags createFlags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT |
VMA_ALLOCATION_CREATE_MAPPED_BIT;
constexpr VmaMemoryUsage memoryUsage = VMA_MEMORY_USAGE_AUTO;
object->Allocate(m_Device, size, usage, createFlags, memoryUsage, name);
return std::move(handle);
}
BufferManager::BufferManager(const Device *device, const u32 maxCount, const u8 binding)
: Manager{device, maxCount, binding}
{
}

View File

@ -0,0 +1,239 @@
// =============================================
// Aster: render_resource_manager.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "systems/commit_manager.h"
#include "EASTL/array.h"
#include "core/device.h"
#include "core/image_view.h"
#include "systems/rendering_device.h"
using namespace systems;
CommitManager *CommitManager::m_Instance = nullptr;
CommitManager::CommitManager(RenderingDevice const *device, u32 const maxBuffers, u32 const maxImages,
u32 const maxStorageImages, Ref<Sampler> defaultSampler)
: m_Device{device}
, m_Buffers{maxBuffers}
, m_Images{maxImages}
, m_StorageImages{maxStorageImages}
, m_DefaultSampler{std::move(defaultSampler)}
{
assert(!m_Instance);
eastl::array poolSizes = {
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eStorageBuffer,
.descriptorCount = maxBuffers,
},
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eCombinedImageSampler,
.descriptorCount = maxImages,
},
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eStorageImage,
.descriptorCount = maxStorageImages,
},
};
vk::DescriptorPoolCreateInfo const poolCreateInfo = {
.flags = vk::DescriptorPoolCreateFlagBits::eUpdateAfterBind,
.maxSets = 1,
.poolSizeCount = static_cast<u32>(poolSizes.size()),
.pPoolSizes = poolSizes.data(),
};
AbortIfFailed(device->m_Device->createDescriptorPool(&poolCreateInfo, nullptr, &m_DescriptorPool));
eastl::array descriptorLayoutBindings = {
vk::DescriptorSetLayoutBinding{
.binding = BUFFER_BINDING_INDEX,
.descriptorType = vk::DescriptorType::eStorageBuffer,
.descriptorCount = static_cast<u32>(maxBuffers),
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
vk::DescriptorSetLayoutBinding{
.binding = IMAGE_BINDING_INDEX,
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
.descriptorCount = static_cast<u32>(maxImages),
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
vk::DescriptorSetLayoutBinding{
.binding = STORAGE_IMAGE_BINDING_INDEX,
.descriptorType = vk::DescriptorType::eStorageImage,
.descriptorCount = static_cast<u32>(maxStorageImages),
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
};
vk::DescriptorBindingFlags bindingFlags =
vk::DescriptorBindingFlagBits::ePartiallyBound | vk::DescriptorBindingFlagBits::eUpdateAfterBind;
eastl::array<vk::DescriptorBindingFlags, descriptorLayoutBindings.size()> layoutBindingFlags;
layoutBindingFlags.fill(bindingFlags);
vk::DescriptorSetLayoutBindingFlagsCreateInfo bindingFlagsCreateInfo = {
.bindingCount = static_cast<u32>(layoutBindingFlags.size()),
.pBindingFlags = layoutBindingFlags.data(),
};
static_assert(layoutBindingFlags.size() == descriptorLayoutBindings.size());
vk::DescriptorSetLayoutCreateInfo const descriptorSetLayoutCreateInfo = {
.pNext = &bindingFlagsCreateInfo,
.flags = vk::DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool,
.bindingCount = static_cast<u32>(descriptorLayoutBindings.size()),
.pBindings = descriptorLayoutBindings.data(),
};
AbortIfFailed(device->m_Device->createDescriptorSetLayout(&descriptorSetLayoutCreateInfo, nullptr, &m_SetLayout));
// One descriptor is enough. Updating it at any time is safe. (Update until submit, data held when pending)
// https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VK_EXT_descriptor_indexing.html
// https://github.com/KhronosGroup/Vulkan-Guide/blob/main/chapters/extensions/VK_EXT_descriptor_indexing.adoc
vk::DescriptorSetAllocateInfo const descriptorSetAllocateInfo = {
.descriptorPool = m_DescriptorPool,
.descriptorSetCount = 1,
.pSetLayouts = &m_SetLayout,
};
AbortIfFailed(device->m_Device->allocateDescriptorSets(&descriptorSetAllocateInfo, &m_DescriptorSet));
device->SetName(m_SetLayout, "Bindless Layout");
device->SetName(m_DescriptorPool, "Bindless Pool");
device->SetName(m_DescriptorSet, "Bindless Set");
m_Instance = this;
}
CommitManager::~CommitManager()
{
m_Device->m_Device->destroy(m_SetLayout, nullptr);
m_Device->m_Device->destroy(m_DescriptorPool, nullptr);
#if !defined(ASTER_NDEBUG)
u32 bufferCount = 0;
for (auto const &entry : m_Buffers.m_Data)
{
bufferCount += entry.m_CommitCount;
}
u32 imageCount = 0;
for (auto const &entry : m_Images.m_Data)
{
imageCount += entry.m_CommitCount;
}
if (bufferCount > 0 || imageCount > 0)
{
WARN("Committed resources at destruction. Buffers: {}, Images: {}", bufferCount, imageCount);
}
#endif
}
ResId<Buffer>
CommitManager::CommitBuffer(Ref<Buffer> const &buffer)
{
auto [commit, isNew] = m_Buffers.Create(buffer);
if (!isNew)
return commit;
m_WriteInfos.emplace_back(vk::DescriptorBufferInfo{
.buffer = buffer->m_Buffer,
.offset = 0,
.range = buffer->m_Size,
});
m_Writes.push_back({
.dstSet = m_DescriptorSet,
.dstBinding = BUFFER_BINDING_INDEX,
.dstArrayElement = commit.m_Index,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eStorageBuffer,
.pBufferInfo = &m_WriteInfos.back().uBufferInfo,
});
return commit;
}
ResId<StorageImageView>
CommitManager::CommitStorageImage(Ref<StorageImageView> const &image)
{
auto [commit, isNew] = m_StorageImages.Create(image);
if (!isNew)
return commit;
m_WriteInfos.emplace_back(vk::DescriptorImageInfo{
.sampler = nullptr,
.imageView = image->m_View,
.imageLayout = vk::ImageLayout::eGeneral,
});
m_Writes.push_back({
.dstSet = m_DescriptorSet,
.dstBinding = STORAGE_IMAGE_BINDING_INDEX,
.dstArrayElement = commit.m_Index,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eStorageImage,
.pImageInfo = &m_WriteInfos.back().uImageInfo,
});
return commit;
}
ResId<TextureView>
CommitManager::CommitTexture(Ref<TextureView> const &handle)
{
return CommitTexture(handle, m_DefaultSampler);
}
ResId<TextureView>
CommitManager::CommitTexture(Ref<TextureView> const &image, Ref<Sampler> const &sampler)
{
auto [commit, isNew] = m_Images.Create(image);
if (!isNew)
return commit;
m_WriteInfos.emplace_back(vk::DescriptorImageInfo{
.sampler = sampler->m_Sampler,
.imageView = image->m_View,
.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
});
m_Writes.push_back({
.dstSet = m_DescriptorSet,
.dstBinding = IMAGE_BINDING_INDEX,
.dstArrayElement = commit.m_Index,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
.pImageInfo = &m_WriteInfos.back().uImageInfo,
});
return commit;
}
CommitManager::WriteInfo::WriteInfo(vk::DescriptorBufferInfo const &info)
: uBufferInfo{info}
{
}
CommitManager::WriteInfo::WriteInfo(vk::DescriptorImageInfo const &info)
: uImageInfo{info}
{
}
CommitManager::WriteInfo::WriteInfo(vk::BufferView const &info)
: uBufferView{info}
{
}
void
CommitManager::Update()
{
// Descriptor Updates
if (!m_Writes.empty())
{
m_Device->m_Device->updateDescriptorSets(static_cast<u32>(m_Writes.size()), m_Writes.data(), 0, nullptr);
m_Writes.clear();
m_WriteInfos.clear();
}
m_Buffers.Update();
m_Images.Update();
}

View File

@ -0,0 +1,506 @@
// =============================================
// Aster: context.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "aster/systems/context.h"
#include "aster/systems/commit_manager.h"
#include "aster/systems/rendering_device.h"
constexpr static u32
GetFormatSize(vk::Format const format)
{
switch (format)
{
case vk::Format::eUndefined:
return 0;
case vk::Format::eR8Unorm:
case vk::Format::eR8Snorm:
case vk::Format::eR8Uscaled:
case vk::Format::eR8Sscaled:
case vk::Format::eR8Uint:
case vk::Format::eR8Sint:
case vk::Format::eR8Srgb:
return 1;
case vk::Format::eR8G8Unorm:
case vk::Format::eR8G8Snorm:
case vk::Format::eR8G8Uscaled:
case vk::Format::eR8G8Sscaled:
case vk::Format::eR8G8Uint:
case vk::Format::eR8G8Sint:
case vk::Format::eR8G8Srgb:
return 2;
case vk::Format::eR8G8B8Unorm:
case vk::Format::eR8G8B8Snorm:
case vk::Format::eR8G8B8Uscaled:
case vk::Format::eR8G8B8Sscaled:
case vk::Format::eR8G8B8Uint:
case vk::Format::eR8G8B8Sint:
case vk::Format::eR8G8B8Srgb:
case vk::Format::eB8G8R8Unorm:
case vk::Format::eB8G8R8Snorm:
case vk::Format::eB8G8R8Uscaled:
case vk::Format::eB8G8R8Sscaled:
case vk::Format::eB8G8R8Uint:
case vk::Format::eB8G8R8Sint:
case vk::Format::eB8G8R8Srgb:
return 3;
case vk::Format::eR8G8B8A8Unorm:
case vk::Format::eR8G8B8A8Snorm:
case vk::Format::eR8G8B8A8Uscaled:
case vk::Format::eR8G8B8A8Sscaled:
case vk::Format::eR8G8B8A8Uint:
case vk::Format::eR8G8B8A8Sint:
case vk::Format::eR8G8B8A8Srgb:
case vk::Format::eB8G8R8A8Unorm:
case vk::Format::eB8G8R8A8Snorm:
case vk::Format::eB8G8R8A8Uscaled:
case vk::Format::eB8G8R8A8Sscaled:
case vk::Format::eB8G8R8A8Uint:
case vk::Format::eB8G8R8A8Sint:
case vk::Format::eB8G8R8A8Srgb:
return 4;
case vk::Format::eR16Unorm:
case vk::Format::eR16Snorm:
case vk::Format::eR16Uscaled:
case vk::Format::eR16Sscaled:
case vk::Format::eR16Uint:
case vk::Format::eR16Sint:
case vk::Format::eR16Sfloat:
return 2;
case vk::Format::eR16G16Unorm:
case vk::Format::eR16G16Snorm:
case vk::Format::eR16G16Uscaled:
case vk::Format::eR16G16Sscaled:
case vk::Format::eR16G16Uint:
case vk::Format::eR16G16Sint:
case vk::Format::eR16G16Sfloat:
return 4;
case vk::Format::eR16G16B16Unorm:
case vk::Format::eR16G16B16Snorm:
case vk::Format::eR16G16B16Uscaled:
case vk::Format::eR16G16B16Sscaled:
case vk::Format::eR16G16B16Uint:
case vk::Format::eR16G16B16Sint:
case vk::Format::eR16G16B16Sfloat:
return 6;
case vk::Format::eR16G16B16A16Unorm:
case vk::Format::eR16G16B16A16Snorm:
case vk::Format::eR16G16B16A16Uscaled:
case vk::Format::eR16G16B16A16Sscaled:
case vk::Format::eR16G16B16A16Uint:
case vk::Format::eR16G16B16A16Sint:
case vk::Format::eR16G16B16A16Sfloat:
return 8;
case vk::Format::eR32Uint:
case vk::Format::eR32Sint:
case vk::Format::eR32Sfloat:
return 4;
case vk::Format::eR32G32Uint:
case vk::Format::eR32G32Sint:
case vk::Format::eR32G32Sfloat:
return 8;
case vk::Format::eR32G32B32Uint:
case vk::Format::eR32G32B32Sint:
case vk::Format::eR32G32B32Sfloat:
return 12;
case vk::Format::eR32G32B32A32Uint:
case vk::Format::eR32G32B32A32Sint:
case vk::Format::eR32G32B32A32Sfloat:
return 16;
case vk::Format::eD16Unorm:
return 2;
case vk::Format::eD32Sfloat:
return 4;
case vk::Format::eS8Uint:
return 1;
case vk::Format::eD16UnormS8Uint:
return 6;
case vk::Format::eD24UnormS8Uint:
return 4;
case vk::Format::eD32SfloatS8Uint:
return 5;
default:
TODO("Esoteric Formats");
}
return 0;
}
void
systems::Context::KeepAlive(Ref<Buffer> const &buffer)
{
assert(m_Pool);
m_Pool->KeepAlive(buffer);
}
void
systems::Context::KeepAlive(Ref<Image> const &image)
{
assert(m_Pool);
m_Pool->KeepAlive(image);
}
void
systems::Context::KeepAlive(Ref<ImageView> const &view)
{
assert(m_Pool);
m_Pool->KeepAlive(view);
}
void
systems::Context::Dependency(vk::DependencyInfo const &dependencyInfo)
{
m_Cmd.pipelineBarrier2(&dependencyInfo);
}
void
systems::Context::Begin()
{
vk::CommandBufferBeginInfo commandBufferBeginInfo = {
.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit,
};
auto result = m_Cmd.begin(&commandBufferBeginInfo);
ERROR_IF(Failed(result), "Could not begin context") THEN_ABORT(result);
}
// Release versions inline 'no-op'.
#if !defined(ASTER_NDEBUG)
void
systems::Context::BeginDebugRegion(cstr const name, vec4 const color)
{
vk::DebugUtilsLabelEXT const label = {
.pLabelName = name,
.color = std::array{color.r, color.g, color.b, color.a},
};
m_Cmd.beginDebugUtilsLabelEXT(&label);
}
void
systems::Context::EndDebugRegion()
{
m_Cmd.endDebugUtilsLabelEXT();
}
#endif
void
systems::Context::End()
{
auto result = m_Cmd.end();
ERROR_IF(Failed(result), "Could not end context") THEN_ABORT(result);
}
void
systems::ComputeContext::Dispatch(Pipeline const &pipeline, u32 x, u32 y, u32 z, usize size, void *data)
{
BindPipeline(pipeline);
PushConstantBlock(0, size, data);
m_Cmd.dispatch(x, y, z);
}
void
systems::ComputeContext::BindPipeline(Pipeline const &pipeline)
{
auto bindPoint = vk::PipelineBindPoint::eGraphics;
switch (pipeline.m_Kind)
{
case Pipeline::Kind::eGraphics:
bindPoint = vk::PipelineBindPoint::eGraphics;
break;
case Pipeline::Kind::eCompute:
bindPoint = vk::PipelineBindPoint::eCompute;
break;
default:
UNREACHABLE("No additional bind points");
}
m_Cmd.bindPipeline(bindPoint, pipeline.m_Pipeline);
// TODO: Maybe find a smarter place to host this.
if (CommitManager::IsInit())
{
m_Cmd.bindDescriptorSets(bindPoint, pipeline.m_Layout, 0, 1, &CommitManager::Instance().GetDescriptorSet(), 0,
nullptr);
}
m_PipelineInUse = &pipeline;
}
void
systems::GraphicsContext::SetViewport(vk::Viewport const &viewport)
{
m_Cmd.setViewport(0, 1, &viewport);
}
void
systems::GraphicsContext::BindVertexBuffer(Ref<VertexBuffer> const &vertexBuffer)
{
constexpr vk::DeviceSize offset = 0;
m_Cmd.bindVertexBuffers(0, 1, &vertexBuffer->m_Buffer, &offset);
}
void
systems::GraphicsContext::BindIndexBuffer(Ref<IndexBuffer> const &indexBuffer)
{
m_Cmd.bindIndexBuffer(indexBuffer->m_Buffer, 0, vk::IndexType::eUint32);
}
void
systems::GraphicsContext::Draw(usize const vertexCount)
{
m_Cmd.draw(static_cast<u32>(vertexCount), 1, 0, 0);
}
void
systems::GraphicsContext::DrawIndexed(usize indexCount)
{
m_Cmd.drawIndexed(static_cast<u32>(indexCount), 1, 0, 0, 0);
}
void
systems::GraphicsContext::DrawIndexed(usize const indexCount, usize const firstIndex, usize const firstVertex)
{
m_Cmd.drawIndexed(static_cast<u32>(indexCount), 1, static_cast<u32>(firstIndex), static_cast<i32>(firstVertex), 0);
}
void
systems::GraphicsContext::BeginRendering(vk::RenderingInfo const &renderingInfo)
{
m_Cmd.beginRendering(&renderingInfo);
m_Cmd.setScissor(0, 1, &renderingInfo.renderArea);
}
void
systems::GraphicsContext::EndRendering()
{
m_Cmd.endRendering();
}
void
systems::TransferContext::UploadTexture(Ref<Image> const &image, eastl::span<u8> const &data)
{
ERROR_IF(not(image and image->IsValid()), "Invalid image");
auto [w, h, d] = image->m_Extent;
auto formatSize = GetFormatSize(image->m_Format);
auto expectedByteSize = static_cast<u64>(w) * static_cast<u64>(h) * static_cast<u64>(d) * formatSize;
ERROR_IF(expectedByteSize != data.size_bytes(), "Mismatch in data size {} vs image size {} ({}x{}x{}x{})",
data.size_bytes(), expectedByteSize, w, h, d, formatSize);
Ref<StagingBuffer> const stagingBuffer = m_Pool->GetDevice().CreateStagingBuffer(data.size_bytes());
stagingBuffer->Write(0, data.size_bytes(), data.data());
vk::BufferImageCopy const bufferImageCopy = {
.bufferOffset = 0,
.bufferRowLength = w,
.bufferImageHeight = h,
.imageSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {},
.imageExtent = image->m_Extent,
};
m_Cmd.copyBufferToImage(stagingBuffer->m_Buffer, image->m_Image, vk::ImageLayout::eTransferDstOptimal, 1,
&bufferImageCopy);
KeepAlive(stagingBuffer);
KeepAlive(image);
}
void
systems::TransferContext::UploadBuffer(Ref<Buffer> const &buffer, usize size, void const *data)
{
ERROR_IF(not(buffer and buffer->IsValid()), "Invalid buffer");
auto expectedByteSize = buffer->m_Size;
ERROR_IF(expectedByteSize != size, "Mismatch in data size {} vs buffer size {}", size, expectedByteSize);
Ref<StagingBuffer> const stagingBuffer = m_Pool->GetDevice().CreateStagingBuffer(size);
stagingBuffer->Write(0, size, data);
vk::BufferCopy const bufferCopy = {.srcOffset = 0, .dstOffset = 0, .size = expectedByteSize};
m_Cmd.copyBuffer(stagingBuffer->m_Buffer, buffer->m_Buffer, 1, &bufferCopy);
KeepAlive(stagingBuffer);
KeepAlive(buffer);
}
void
systems::TransferContext::Blit(vk::BlitImageInfo2 const &mipBlitInfo)
{
m_Cmd.blitImage2(&mipBlitInfo);
}
systems::TransferContext::TransferContext(TransferContext &&other) noexcept
: Context{std::move(other)}
{
}
systems::TransferContext &
systems::TransferContext::operator=(TransferContext &&other) noexcept
{
if (this == &other)
return *this;
Context::operator=(std::move(other));
return *this;
}
void
systems::ComputeContext::PushConstantBlock(usize const offset, usize const size, void const *data)
{
assert(m_PipelineInUse);
vk::ShaderStageFlags stage;
switch (m_PipelineInUse->m_Kind)
{
case Pipeline::Kind::eGraphics:
stage = vk::ShaderStageFlagBits::eAll;
break;
case Pipeline::Kind::eCompute:
stage = vk::ShaderStageFlagBits::eCompute;
break;
}
m_Cmd.pushConstants(m_PipelineInUse->m_Layout, stage, static_cast<u32>(offset), static_cast<u32>(size), data);
}
using namespace systems::_internal;
ContextPool::ContextPool(RenderingDevice &device, u32 const queueFamilyIndex, ManagedBy const managedBy)
: m_Device{&device}
, m_BuffersAllocated{0}
, m_ExtraData{0}
, m_ManagedBy{managedBy}
, m_ResetCallback{}
{
vk::CommandPoolCreateInfo const commandPoolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient,
.queueFamilyIndex = queueFamilyIndex,
};
AbortIfFailed(device.m_Device->createCommandPool(&commandPoolCreateInfo, nullptr, &m_Pool));
}
ContextPool::ContextPool(ContextPool &&other) noexcept
: m_Device{other.m_Device}
, m_Pool{Take(other.m_Pool)}
, m_CommandBuffers{std::move(other.m_CommandBuffers)}
, m_BuffersAllocated{other.m_BuffersAllocated}
, m_ExtraData{other.m_ExtraData}
, m_ManagedBy{other.m_ManagedBy}
, m_OwnedBuffers{std::move(other.m_OwnedBuffers)}
, m_OwnedImages{std::move(other.m_OwnedImages)}
, m_OwnedImageViews{std::move(other.m_OwnedImageViews)}
, m_ResetCallback{std::move(other.m_ResetCallback)}
{
}
ContextPool &
ContextPool::operator=(ContextPool &&other) noexcept
{
if (this == &other)
return *this;
using eastl::swap;
swap(m_Device, other.m_Device);
swap(m_Pool, other.m_Pool);
swap(m_CommandBuffers, other.m_CommandBuffers);
swap(m_ExtraData, other.m_ExtraData);
swap(m_ManagedBy, other.m_ManagedBy);
swap(m_BuffersAllocated, other.m_BuffersAllocated);
swap(m_OwnedBuffers, other.m_OwnedBuffers);
swap(m_OwnedImages, other.m_OwnedImages);
swap(m_OwnedImageViews, other.m_OwnedImageViews);
swap(m_ResetCallback, other.m_ResetCallback);
return *this;
}
ContextPool::~ContextPool()
{
if (!m_Pool)
return;
m_Device->m_Device->destroy(Take(m_Pool), nullptr);
}
void
ContextPool::KeepAlive(Ref<Buffer> const &buffer)
{
m_OwnedBuffers.push_back(buffer);
}
void
ContextPool::KeepAlive(Ref<Image> const &image)
{
m_OwnedImages.push_back(image);
}
void
ContextPool::KeepAlive(Ref<ImageView> const &view)
{
m_OwnedImageViews.push_back(view);
}
vk::CommandBuffer
ContextPool::AllocateCommandBuffer()
{
// Buffers are available.
if (m_BuffersAllocated < m_CommandBuffers.size())
{
return m_CommandBuffers[m_BuffersAllocated++];
}
// Allocate New Buffer.
vk::CommandBufferAllocateInfo const allocateInfo = {
.commandPool = m_Pool,
.level = vk::CommandBufferLevel::ePrimary,
.commandBufferCount = 1,
};
vk::CommandBuffer &cmd = m_CommandBuffers.emplace_back();
AbortIfFailed(m_Device->m_Device->allocateCommandBuffers(&allocateInfo, &cmd));
++m_BuffersAllocated;
return cmd;
}
systems::Context
ContextPool::CreateContext()
{
return Context{*this, AllocateCommandBuffer()};
}
void
ContextPool::Reset()
{
assert(m_Pool);
AbortIfFailed(m_Device->m_Device->resetCommandPool(m_Pool, {}));
m_BuffersAllocated = 0;
m_OwnedBuffers.clear();
m_OwnedImages.clear();
m_OwnedImageViews.clear();
}
systems::TransferContext
TransferContextPool::CreateTransferContext()
{
return TransferContext{*this, AllocateCommandBuffer()};
}
systems::ComputeContext
ComputeContextPool::CreateComputeContext()
{
return ComputeContext{*this, AllocateCommandBuffer()};
}
systems::GraphicsContext
GraphicsContextPool::CreateGraphicsContext()
{
return GraphicsContext{*this, AllocateCommandBuffer()};
}

View File

@ -1,316 +0,0 @@
// =============================================
// Aster: buffer_manager.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "systems/image_manager.h"
#include "core/device.h"
Manager<Image> *Manager<Image>::m_Instance = nullptr;
using namespace systems;
vk::ImageCreateInfo ToImageCreateInfo(const Texture2DCreateInfo &createInfo);
vk::ImageCreateInfo ToImageCreateInfo(const TextureCubeCreateInfo &createInfo);
vk::ImageCreateInfo ToImageCreateInfo(const AttachmentCreateInfo &createInfo);
vk::ImageCreateInfo ToImageCreateInfo(const DepthStencilImageCreateInfo &createInfo);
namespace usage_flags
{
constexpr vk::ImageUsageFlags MIPMAP = vk::ImageUsageFlagBits::eTransferSrc | vk::ImageUsageFlagBits::eTransferDst;
constexpr vk::ImageUsageFlags SAMPLE = vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferDst;
constexpr vk::ImageUsageFlags STORAGE =
vk::ImageUsageFlagBits::eStorage | vk::ImageUsageFlagBits::eTransferDst | vk::ImageUsageFlagBits::eTransferSrc;
constexpr vk::ImageUsageFlags COLOR_ATTACHMENT =
vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc;
constexpr vk::ImageUsageFlags DEPTH_STENCIL_ATTACHMENT = vk::ImageUsageFlagBits::eDepthStencilAttachment;
} // namespace usage_flags
ImageHandle
ImageManager::CreateTexture2D(const Texture2DCreateInfo &createInfo)
{
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = {},
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
vk::ImageCreateInfo imageCreateInfo = ToImageCreateInfo(createInfo);
auto result = Cast<vk::Result>(vmaCreateImage(m_Device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", createInfo.m_Name, result) THEN_ABORT(result);
vk::ImageView view;
const vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::e2D,
.format = imageCreateInfo.format,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = imageCreateInfo.mipLevels,
.baseArrayLayer = 0,
.layerCount = imageCreateInfo.arrayLayers,
},
};
result = m_Device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", createInfo.m_Name, result)
THEN_ABORT(result);
auto [handle, object] = Alloc();
object->m_Image = image;
object->m_View = view;
object->m_Allocation = allocation;
object->m_Extent = imageCreateInfo.extent;
object->m_Flags_ = Image::OWNED_BIT | Image::VALID_BIT;
object->m_LayerCount = Cast<u8>(imageCreateInfo.arrayLayers);
object->m_MipLevels = Cast<u8>(imageCreateInfo.mipLevels);
m_Device->SetName(object->m_Image, createInfo.m_Name);
return handle;
}
ImageHandle
ImageManager::CreateTextureCube(const TextureCubeCreateInfo &createInfo)
{
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = {},
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
vk::ImageCreateInfo imageCreateInfo = ToImageCreateInfo(createInfo);
auto result = Cast<vk::Result>(vmaCreateImage(m_Device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", createInfo.m_Name, result) THEN_ABORT(result);
vk::ImageView view;
const vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::eCube,
.format = imageCreateInfo.format,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = imageCreateInfo.mipLevels,
.baseArrayLayer = 0,
.layerCount = imageCreateInfo.arrayLayers,
},
};
result = m_Device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", createInfo.m_Name, result)
THEN_ABORT(result);
auto [handle, object] = Alloc();
object->m_Image = image;
object->m_View = view;
object->m_Allocation = allocation;
object->m_Extent = imageCreateInfo.extent;
object->m_Flags_ = Image::OWNED_BIT | Image::VALID_BIT;
object->m_LayerCount = Cast<u8>(imageCreateInfo.arrayLayers);
object->m_MipLevels = Cast<u8>(imageCreateInfo.mipLevels);
m_Device->SetName(object->m_Image, createInfo.m_Name);
return handle;
}
ImageHandle
ImageManager::CreateAttachment(const AttachmentCreateInfo &createInfo)
{
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
vk::ImageCreateInfo imageCreateInfo = ToImageCreateInfo(createInfo);
auto result = Cast<vk::Result>(vmaCreateImage(m_Device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", createInfo.m_Name, result) THEN_ABORT(result);
vk::ImageView view;
const vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::e2D,
.format = imageCreateInfo.format,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = imageCreateInfo.mipLevels,
.baseArrayLayer = 0,
.layerCount = imageCreateInfo.arrayLayers,
},
};
result = m_Device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", createInfo.m_Name, result)
THEN_ABORT(result);
auto [handle, object] = Alloc();
object->m_Image = image;
object->m_View = view;
object->m_Allocation = allocation;
object->m_Extent = imageCreateInfo.extent;
object->m_Flags_ = Image::OWNED_BIT | Image::VALID_BIT;
object->m_LayerCount = Cast<u8>(imageCreateInfo.arrayLayers);
object->m_MipLevels = Cast<u8>(imageCreateInfo.mipLevels);
m_Device->SetName(object->m_Image, createInfo.m_Name);
return handle;
}
ImageHandle
ImageManager::CreateDepthStencilImage(const DepthStencilImageCreateInfo &createInfo)
{
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
vk::ImageCreateInfo imageCreateInfo = ToImageCreateInfo(createInfo);
auto result = Cast<vk::Result>(vmaCreateImage(m_Device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", createInfo.m_Name, result) THEN_ABORT(result);
vk::ImageView view;
const vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::e2D,
.format = imageCreateInfo.format,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil,
.baseMipLevel = 0,
.levelCount = imageCreateInfo.mipLevels,
.baseArrayLayer = 0,
.layerCount = imageCreateInfo.arrayLayers,
},
};
result = m_Device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", createInfo.m_Name, result)
THEN_ABORT(result);
auto [handle, object] = Alloc();
object->m_Image = image;
object->m_View = view;
object->m_Allocation = allocation;
object->m_Extent = imageCreateInfo.extent;
object->m_Flags_ = Image::OWNED_BIT | Image::VALID_BIT;
object->m_LayerCount = Cast<u8>(imageCreateInfo.arrayLayers);
object->m_MipLevels = Cast<u8>(imageCreateInfo.mipLevels);
m_Device->SetName(object->m_Image, createInfo.m_Name);
return handle;
}
vk::ImageCreateInfo
ToImageCreateInfo(const Texture2DCreateInfo &createInfo)
{
auto &[format, extent, name, isSampled, isMipMapped, isStorage] = createInfo;
WARN_IF(!IsPowerOfTwo(extent.width) || !IsPowerOfTwo(extent.width), "Image {2} is {0}x{1} (Non Power of Two)",
extent.width, extent.height, name ? name : "<unnamed>");
const u8 mipLevels = isMipMapped ? 1 + Cast<u8>(floor(log2(eastl::max(extent.width, extent.height)))) : 1;
auto usage = vk::ImageUsageFlags{};
if (isSampled)
usage |= usage_flags::SAMPLE;
if (isMipMapped)
usage |= usage_flags::MIPMAP;
if (isStorage)
usage |= usage_flags::STORAGE;
return {
.imageType = vk::ImageType::e2D,
.format = format,
.extent = ToExtent3D(extent, 1),
.mipLevels = mipLevels,
.arrayLayers = 1,
.usage = usage,
};
}
vk::ImageCreateInfo
ToImageCreateInfo(const TextureCubeCreateInfo &createInfo)
{
auto &[format, side, name, isSampled, isMipMapped, isStorage] = createInfo;
WARN_IF(!IsPowerOfTwo(side), "ImageCube {1} is {0}x{0} (Non Power of Two)", side, name ? name : "<unnamed>");
const u8 mipLevels = isMipMapped ? 1 + Cast<u8>(floor(log2(side))) : 1;
auto usage = vk::ImageUsageFlags{};
if (isSampled)
usage |= usage_flags::SAMPLE;
if (isMipMapped)
usage |= usage_flags::MIPMAP;
if (isStorage)
usage |= usage_flags::STORAGE;
return {
.flags = vk::ImageCreateFlagBits::eCubeCompatible,
.imageType = vk::ImageType::e2D,
.format = format,
.extent = {side, side, 1},
.mipLevels = mipLevels,
.arrayLayers = 6,
.usage = usage,
};
}
vk::ImageCreateInfo
ToImageCreateInfo(const AttachmentCreateInfo &createInfo)
{
auto &[format, extent, name] = createInfo;
constexpr auto usage = usage_flags::COLOR_ATTACHMENT;
return {
.imageType = vk::ImageType::e2D,
.format = format,
.extent = ToExtent3D(extent, 1),
.mipLevels = 1,
.arrayLayers = 1,
.usage = usage,
};
}
vk::ImageCreateInfo
ToImageCreateInfo(const DepthStencilImageCreateInfo &createInfo)
{
auto &[extent, name] = createInfo;
constexpr vk::Format format = vk::Format::eD24UnormS8Uint;
constexpr auto usage = usage_flags::DEPTH_STENCIL_ATTACHMENT;
return {
.imageType = vk::ImageType::e2D,
.format = format,
.extent = ToExtent3D(extent, 1),
.mipLevels = 1,
.arrayLayers = 1,
.usage = usage,
};
}
ImageManager::ImageManager(const Device *device, const u32 maxCount, const u8 binding)
: Manager{device, maxCount, binding}
{
}

View File

@ -1,6 +0,0 @@
// =============================================
// Aster: manager.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "systems/manager.h"

View File

@ -0,0 +1,374 @@
// =============================================
// Aster: pipeline_helpers.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "systems/rendering_device.h"
#include <aster/systems/pipeline_helpers.h>
using namespace systems::_internal;
struct WhatVisitor
{
std::string
operator()(std::monostate) const
{
return "No Error";
}
std::string
operator()(vk::Result result) const
{
return fmt::format("Vulkan Error: {}", result);
}
std::string
operator()(SlangResult result) const
{
return fmt::format("Slang Error: {}", result);
}
};
struct ValueVisitor
{
i32
operator()(std::monostate) const
{
return 0;
}
i32
operator()(vk::Result result) const
{
return static_cast<i32>(result);
}
i32
operator()(SlangResult result) const
{
return result;
}
};
i32
systems::PipelineCreationError::Value()
{
return std::visit(ValueVisitor{}, m_Data);
}
systems::PipelineCreationError::PipelineCreationError(vk::Result res)
: m_Data{res}
{
}
systems::PipelineCreationError::PipelineCreationError(SlangResult res)
: m_Data{res}
{
}
systems::PipelineCreationError::PipelineCreationError()
: m_Data{std::monostate{}}
{
}
systems::PipelineCreationError::operator bool() const
{
return not std::holds_alternative<std::monostate>(m_Data);
}
std::string
systems::PipelineCreationError::What()
{
return std::visit(WhatVisitor{}, m_Data);
}
vk::ShaderStageFlagBits
systems::SlangToVulkanShaderStage(SlangStage const stage)
{
switch (stage)
{
case SLANG_STAGE_VERTEX:
return vk::ShaderStageFlagBits::eVertex;
case SLANG_STAGE_HULL:
return vk::ShaderStageFlagBits::eTessellationControl;
case SLANG_STAGE_DOMAIN:
return vk::ShaderStageFlagBits::eTessellationEvaluation;
case SLANG_STAGE_GEOMETRY:
return vk::ShaderStageFlagBits::eGeometry;
case SLANG_STAGE_FRAGMENT:
return vk::ShaderStageFlagBits::eFragment;
case SLANG_STAGE_COMPUTE:
return vk::ShaderStageFlagBits::eCompute;
case SLANG_STAGE_RAY_GENERATION:
return vk::ShaderStageFlagBits::eRaygenKHR;
case SLANG_STAGE_INTERSECTION:
return vk::ShaderStageFlagBits::eIntersectionKHR;
case SLANG_STAGE_ANY_HIT:
return vk::ShaderStageFlagBits::eAnyHitKHR;
case SLANG_STAGE_CLOSEST_HIT:
return vk::ShaderStageFlagBits::eClosestHitKHR;
case SLANG_STAGE_MISS:
return vk::ShaderStageFlagBits::eMissKHR;
case SLANG_STAGE_CALLABLE:
return vk::ShaderStageFlagBits::eCallableKHR;
case SLANG_STAGE_MESH:
return vk::ShaderStageFlagBits::eMeshEXT;
case SLANG_STAGE_AMPLIFICATION:
return vk::ShaderStageFlagBits::eTaskEXT;
case SLANG_STAGE_NONE:
case SLANG_STAGE_COUNT:
UNREACHABLE();
return {};
}
UNREACHABLE();
return {};
}
PipelineLayoutBuilder::PipelineLayoutBuilder(RenderingDevice *device, vk::DescriptorSetLayout bindlessLayout)
: m_Device{device}
, m_DescriptorSetLayouts{bindlessLayout} // if `null` will be filtered out during build.
{
}
vk::PipelineLayout
PipelineLayoutBuilder::Build()
{
vk::PipelineLayout pipelineLayout;
eastl::vector<vk::DescriptorSetLayout> filteredDescriptorSetLayouts;
filteredDescriptorSetLayouts.reserve(m_DescriptorSetLayouts.size());
for (auto dsl : m_DescriptorSetLayouts)
{
if (dsl)
{
filteredDescriptorSetLayouts.push_back(dsl);
}
}
vk::PipelineLayoutCreateInfo const createInfo = {
.setLayoutCount = static_cast<u32>(filteredDescriptorSetLayouts.size()),
.pSetLayouts = filteredDescriptorSetLayouts.data(),
.pushConstantRangeCount = static_cast<u32>(m_PushConstants.size()),
.pPushConstantRanges = m_PushConstants.data(),
};
AbortIfFailed(m_Device->m_Device->createPipelineLayout(&createInfo, nullptr, &pipelineLayout));
return pipelineLayout;
}
vk::DescriptorSetLayout
PipelineLayoutBuilder::CreateDescriptorSetLayout(vk::DescriptorSetLayoutCreateInfo const &createInfo) const
{
vk::DescriptorSetLayout dsl;
// Failure Cases are OoM errors. No recovery.
AbortIfFailed(m_Device->m_Device->createDescriptorSetLayout(&createInfo, nullptr, &dsl));
return dsl;
}
void
PipelineLayoutBuilder::AddDescriptorSetForParameterBlock(slang::TypeLayoutReflection *layout)
{
DescriptorLayoutBuilder descriptorLayoutBuilder{this};
descriptorLayoutBuilder.AddRangesForParamBlockElement(layout->getElementTypeLayout());
descriptorLayoutBuilder.Build();
}
void
PipelineLayoutBuilder::AddPushConstantRangeForConstantBuffer(slang::TypeLayoutReflection *layout)
{
auto const elementTypeLayout = layout->getElementTypeLayout();
auto const elementSize = elementTypeLayout->getSize();
if (elementSize == 0)
return;
m_PushConstants.push_back({
.stageFlags = m_Stage,
.offset = 0,
.size = static_cast<u32>(elementSize),
});
}
void
PipelineLayoutBuilder::AddSubObjectRange(slang::TypeLayoutReflection *layout, i64 subObjectRangeIndex)
{
auto bindingRangeIndex = layout->getSubObjectRangeBindingRangeIndex(subObjectRangeIndex);
switch (layout->getBindingRangeType(bindingRangeIndex))
{
case slang::BindingType::ParameterBlock: {
auto const parameterBlockTypeLayout = layout->getBindingRangeLeafTypeLayout(bindingRangeIndex);
AddDescriptorSetForParameterBlock(parameterBlockTypeLayout);
}
break;
case slang::BindingType::PushConstant: {
auto const constantBufferTypeLayout = layout->getBindingRangeLeafTypeLayout(bindingRangeIndex);
AddPushConstantRangeForConstantBuffer(constantBufferTypeLayout);
}
break;
default:
UNREACHABLE("Unexpected types");
}
}
vk::DescriptorType
BindingTypeToDescriptorType(slang::BindingType binding)
{
using vk::DescriptorType;
switch (binding)
{
case slang::BindingType::Sampler:
return DescriptorType::eSampler;
case slang::BindingType::Texture:
return DescriptorType::eSampledImage;
case slang::BindingType::ConstantBuffer:
return DescriptorType::eUniformBuffer;
case slang::BindingType::TypedBuffer:
return DescriptorType::eStorageBuffer;
case slang::BindingType::RawBuffer:
return DescriptorType::eStorageBuffer;
case slang::BindingType::CombinedTextureSampler:
return DescriptorType::eCombinedImageSampler;
case slang::BindingType::InlineUniformData:
return DescriptorType::eInlineUniformBlock;
case slang::BindingType::RayTracingAccelerationStructure:
return DescriptorType::eAccelerationStructureKHR;
case slang::BindingType::MutableTexture:
return DescriptorType::eStorageImage;
case slang::BindingType::MutableTypedBuffer:
return DescriptorType::eStorageBuffer;
case slang::BindingType::MutableRawBuffer:
return DescriptorType::eStorageBuffer;
default:
UNREACHABLE("Unsupported Types");
}
return {};
}
vk::ShaderStageFlags &
DescriptorLayoutBuilder::Stage() const
{
return m_PipelineLayoutBuilder->m_Stage;
}
DescriptorLayoutBuilder::DescriptorLayoutBuilder(PipelineLayoutBuilder *pipelineLayoutBuilder)
: m_PipelineLayoutBuilder{pipelineLayoutBuilder}
, m_SetIndex{static_cast<u32>(pipelineLayoutBuilder->m_DescriptorSetLayouts.size())}
{
m_PipelineLayoutBuilder->m_DescriptorSetLayouts.push_back();
}
void
DescriptorLayoutBuilder::AddDescriptorRange(slang::TypeLayoutReflection *layout, i64 const relativeSetIndex,
i64 const rangeIndex)
{
auto const bindingType = layout->getDescriptorSetDescriptorRangeType(relativeSetIndex, rangeIndex);
if (bindingType == slang::BindingType::PushConstant)
return;
u32 const descriptorCount =
static_cast<u32>(layout->getDescriptorSetDescriptorRangeDescriptorCount(relativeSetIndex, rangeIndex));
u32 const bindingIndex = static_cast<u32>(m_LayoutBindings.size());
auto const vkBindingType = BindingTypeToDescriptorType(bindingType);
m_LayoutBindings.push_back({
.binding = bindingIndex,
.descriptorType = vkBindingType,
.descriptorCount = descriptorCount,
.stageFlags = Stage(),
});
}
void
DescriptorLayoutBuilder::AddDescriptorRanges(slang::TypeLayoutReflection *layout)
{
i64 nSets = layout->getDescriptorSetCount();
for (i64 relativeSetIndex = 0; relativeSetIndex < nSets; ++relativeSetIndex)
{
i64 rangeCount = layout->getDescriptorSetDescriptorRangeCount(relativeSetIndex);
for (i64 rangeIndex = 0; rangeIndex < rangeCount; ++rangeIndex)
{
AddDescriptorRange(layout, relativeSetIndex, rangeIndex);
}
}
}
void
DescriptorLayoutBuilder::Build()
{
if (m_LayoutBindings.empty())
return;
auto const dsl = m_PipelineLayoutBuilder->CreateDescriptorSetLayout({
.bindingCount = static_cast<u32>(m_LayoutBindings.size()),
.pBindings = m_LayoutBindings.data(),
});
m_PipelineLayoutBuilder->m_DescriptorSetLayouts[m_SetIndex] = dsl;
}
void
DescriptorLayoutBuilder::AddAutomaticallyIntroducedUniformBuffer()
{
auto const vulkanBindingIndex = static_cast<u32>(m_LayoutBindings.size());
m_LayoutBindings.push_back({
.binding = vulkanBindingIndex,
.descriptorType = vk::DescriptorType::eUniformBuffer,
.descriptorCount = 1,
.stageFlags = vk::ShaderStageFlagBits::eAll,
});
}
void
DescriptorLayoutBuilder::AddRanges(slang::TypeLayoutReflection *layout)
{
AddDescriptorRanges(layout);
m_PipelineLayoutBuilder->AddSubObjectRanges(layout);
}
void
DescriptorLayoutBuilder::AddRangesForParamBlockElement(slang::TypeLayoutReflection *layout)
{
if (layout->getSize() > 0)
{
AddAutomaticallyIntroducedUniformBuffer();
}
AddRanges(layout);
}
void
DescriptorLayoutBuilder::AddGlobalScopeParameters(slang::ProgramLayout *layout)
{
Stage() = vk::ShaderStageFlagBits::eAll;
AddRangesForParamBlockElement(layout->getGlobalParamsTypeLayout());
}
void
DescriptorLayoutBuilder::AddEntryPointParameters(slang::ProgramLayout *layout)
{
u64 entryPointCount = layout->getEntryPointCount();
for (u64 i = 0; i < entryPointCount; ++i)
{
auto *entryPoint = layout->getEntryPointByIndex(i);
AddEntryPointParameters(entryPoint);
}
}
void
DescriptorLayoutBuilder::AddEntryPointParameters(slang::EntryPointLayout *layout)
{
Stage() = SlangToVulkanShaderStage(layout->getStage());
AddRangesForParamBlockElement(layout->getTypeLayout());
}
void
PipelineLayoutBuilder::AddSubObjectRanges(slang::TypeLayoutReflection *layout)
{
i64 subObjectRangeCount = layout->getSubObjectRangeCount();
for (i64 subObjectRangeIndex = 0; subObjectRangeIndex < subObjectRangeCount; ++subObjectRangeIndex)
{
AddSubObjectRange(layout, subObjectRangeIndex);
}
}

View File

@ -1,195 +0,0 @@
// =============================================
// Aster: render_resource_manager.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "systems/render_resource_manager.h"
#include "EASTL/array.h"
#include "core/device.h"
#define AbortIfFailed(RESULT) \
do \
{ \
vk::Result _checkResultValue_; \
ERROR_IF(Failed(_checkResultValue_ = Cast<vk::Result>(RESULT)), "Cause: {}", _checkResultValue_) \
THEN_ABORT(_checkResultValue_); \
} while (false)
#define AbortIfFailedMV(RESULT, MSG, EXTRA) \
do \
{ \
vk::Result _checkResultValue_; \
ERROR_IF(Failed(_checkResultValue_ = Cast<vk::Result>(RESULT)), MSG " Cause: {}", EXTRA, _checkResultValue_) \
THEN_ABORT(_checkResultValue_); \
} while (false)
#define AbortIfFailedM(RESULT, MSG) \
do \
{ \
auto _checkResultValue_ = Cast<vk::Result>(RESULT); \
ERROR_IF(Failed(_checkResultValue_), MSG " Cause: {}", _checkResultValue_) THEN_ABORT(_checkResultValue_); \
} while (false)
using namespace systems;
u32
GetHandleInternal(concepts::HandleType auto &handle)
{
return *Recast<u32 *>(&handle);
}
RenderResourceManager::WriteOwner::WriteOwner(const Handle<Buffer> &handle)
: uBufferHandle(handle)
{
}
RenderResourceManager::WriteOwner::WriteOwner(const Handle<Image> &handle)
: uImageHandle(handle)
{
}
RenderResourceManager::RenderResourceManager(const Device *device, u32 const maxBuffers, const u32 maxImages)
: m_BufferManager{device, maxBuffers, BUFFER_BINDING_INDEX}
, m_ImageManager{device, maxImages, IMAGE_BINDING_INDEX}
{
eastl::array poolSizes = {
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eStorageBuffer,
.descriptorCount = maxBuffers,
},
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eCombinedImageSampler,
.descriptorCount = maxImages,
},
//vk::DescriptorPoolSize{
// .type = vk::DescriptorType::eStorageImage,
// .descriptorCount = storageTexturesCount,
//},
};
const vk::DescriptorPoolCreateInfo poolCreateInfo = {
.flags = vk::DescriptorPoolCreateFlagBits::eUpdateAfterBind,
.maxSets = 1,
.poolSizeCount = Cast<u32>(poolSizes.size()),
.pPoolSizes = poolSizes.data(),
};
AbortIfFailed(device->m_Device.createDescriptorPool(&poolCreateInfo, nullptr, &m_DescriptorPool));
eastl::array descriptorLayoutBindings = {
vk::DescriptorSetLayoutBinding{
.binding = BUFFER_BINDING_INDEX,
.descriptorType = vk::DescriptorType::eStorageBuffer,
.descriptorCount = Cast<u32>(maxBuffers),
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
vk::DescriptorSetLayoutBinding{
.binding = IMAGE_BINDING_INDEX,
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
.descriptorCount = Cast<u32>(maxImages),
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
//vk::DescriptorSetLayoutBinding{
// .binding = STORAGE_TEXTURE_BINDING_INDEX,
// .descriptorType = vk::DescriptorType::eStorageImage,
// .descriptorCount = Cast<u32>(storageTexturesCount),
// .stageFlags = vk::ShaderStageFlagBits::eAll,
//},
};
vk::DescriptorBindingFlags bindingFlags =
vk::DescriptorBindingFlagBits::ePartiallyBound | vk::DescriptorBindingFlagBits::eUpdateAfterBind;
eastl::array<vk::DescriptorBindingFlags, decltype(descriptorLayoutBindings)::count> layoutBindingFlags;
layoutBindingFlags.fill(bindingFlags);
vk::DescriptorSetLayoutBindingFlagsCreateInfo bindingFlagsCreateInfo = {
.bindingCount = Cast<u32>(layoutBindingFlags.size()),
.pBindingFlags = layoutBindingFlags.data(),
};
static_assert(layoutBindingFlags.size() == descriptorLayoutBindings.size());
const vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = {
.pNext = &bindingFlagsCreateInfo,
.flags = vk::DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool,
.bindingCount = Cast<u32>(descriptorLayoutBindings.size()),
.pBindings = descriptorLayoutBindings.data(),
};
AbortIfFailed(device->m_Device.createDescriptorSetLayout(&descriptorSetLayoutCreateInfo, nullptr, &m_SetLayout));
// One descriptor is enough. Updating it at any time is safe. (Update until submit, data held when pending)
// https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VK_EXT_descriptor_indexing.html
// https://github.com/KhronosGroup/Vulkan-Guide/blob/main/chapters/extensions/VK_EXT_descriptor_indexing.adoc
const vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo = {
.descriptorPool = m_DescriptorPool,
.descriptorSetCount = 1,
.pSetLayouts = &m_SetLayout,
};
AbortIfFailed(device->m_Device.allocateDescriptorSets(&descriptorSetAllocateInfo, &m_DescriptorSet));
device->SetName(m_SetLayout, "Bindless Layout");
device->SetName(m_DescriptorPool, "Bindless Pool");
device->SetName(m_DescriptorSet, "Bindless Set");
}
void
systems::RenderResourceManager::Commit(concepts::HandleType auto &handle)
{
using HandleType = decltype(handle)::Type;
if constexpr (std::is_same_v<HandleType, Buffer>)
{
const Buffer *buffer = handle.Fetch();
m_WriteInfos.emplace_back(vk::DescriptorBufferInfo{
.buffer = buffer->m_Buffer,
.offset = 0,
.range = buffer->GetSize(),
});
m_Writes.push_back({
.dstSet = m_DescriptorSet,
.dstBinding = BUFFER_BINDING_INDEX,
.dstArrayElement = handle.GetIndex(),
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eStorageBuffer,
.pBufferInfo = &m_WriteInfos.back().uBufferInfo,
});
}
else if constexpr (std::is_same_v<HandleType, Image>)
{
const Image *image = handle.Fetch();
m_WriteInfos.emplace_back(vk::DescriptorImageInfo{
.sampler = nullptr /* TODO Sampler */,
.imageView = image->m_View,
.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
});
m_Writes.push_back({
.dstSet = m_DescriptorSet,
.dstBinding = IMAGE_BINDING_INDEX,
.dstArrayElement = handle.GetIndex(),
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eSampledImage,
.pImageInfo = &m_WriteInfos.back().uImageInfo,
});
} else {
static_assert(false && "Type is currently unsupported");
}
m_WriteOwner.emplace_back(handle);
}
RenderResourceManager::WriteInfo::WriteInfo(const vk::DescriptorBufferInfo &info)
: uBufferInfo{info}
{
}
RenderResourceManager::WriteInfo::WriteInfo(const vk::DescriptorImageInfo &info)
: uImageInfo{info}
{
}
RenderResourceManager::WriteInfo::WriteInfo(const vk::BufferView &info)
: uBufferView{info}
{
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,150 @@
// =============================================
// Aster: sync_server.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "aster/systems/sync_server.h"
#include "aster/systems/rendering_device.h"
using namespace systems::_internal;
SyncServer::Entry::Entry(RenderingDevice &device)
: m_CurrentPoint{0, 1}
, m_AttachedPool{nullptr}
{
constexpr static vk::SemaphoreTypeCreateInfo TYPE_CREATE_INFO = {
.semaphoreType = vk::SemaphoreType::eTimeline,
.initialValue = 0,
};
constexpr static vk::SemaphoreCreateInfo SEMAPHORE_CREATE_INFO = {.pNext = &TYPE_CREATE_INFO};
AbortIfFailed(device.m_Device->createSemaphore(&SEMAPHORE_CREATE_INFO, nullptr, &m_Semaphore));
}
void
SyncServer::Entry::Destroy(RenderingDevice &device)
{
if (m_Semaphore)
{
device.m_Device->destroy(Take(m_Semaphore), nullptr);
}
}
void
SyncServer::Entry::Wait(RenderingDevice &device)
{
vk::SemaphoreWaitInfo const waitInfo = {
.semaphoreCount = 1,
.pSemaphores = &m_Semaphore,
.pValues = &m_CurrentPoint.m_NextValue,
};
// This blocks.
// So `m_NextValue` is not modified while we wait for the signal.
AbortIfFailed(device.m_Device->waitSemaphores(&waitInfo, MaxValue<u64>));
// Thus, this is safe.
m_CurrentPoint.m_WaitValue = m_CurrentPoint.m_NextValue;
m_CurrentPoint.m_WaitValue = m_CurrentPoint.m_NextValue + 1;
if (m_AttachedPool)
{
m_AttachedPool->Reset();
m_AttachedPool = nullptr;
}
}
void
SyncServer::Entry::Next()
{
m_CurrentPoint.m_WaitValue = m_CurrentPoint.m_NextValue;
++m_CurrentPoint.m_NextValue;
}
void
SyncServer::Entry::AttachPool(ContextPool *pool)
{
assert(!m_AttachedPool);
m_AttachedPool = pool;
}
systems::Receipt
SyncServer::Allocate()
{
auto &entry = AllocateEntry();
return Receipt{&entry};
}
void
SyncServer::Free(Receipt const receipt)
{
FreeEntry(GetEntry(receipt));
}
void
SyncServer::WaitOn(Receipt const receipt)
{
auto &entry = GetEntry(receipt);
entry.Wait(*m_Device);
FreeEntry(entry);
}
SyncServer::Entry &
SyncServer::AllocateEntry()
{
if (not m_FreeList.empty())
{
auto &alloc = m_FreeList.back();
m_FreeList.pop_back();
return alloc;
}
return m_Allocations.emplace_back(*m_Device);
}
void
SyncServer::FreeEntry(Entry &entry)
{
entry.Next();
m_FreeList.push_back(entry);
}
SyncServer::Entry &
SyncServer::GetEntry(Receipt receipt)
{
return *static_cast<Entry *>(receipt.m_Opaque);
}
SyncServer::SyncServer(RenderingDevice &device)
: m_Device{&device}
{
}
SyncServer::~SyncServer()
{
if (m_Device && !m_Allocations.empty())
{
for (auto &entry : m_Allocations)
{
entry.Destroy(*m_Device);
}
m_Device = nullptr;
}
}
SyncServer::SyncServer(SyncServer &&other) noexcept
: m_Device{Take(other.m_Device)}
, m_Allocations{std::move(other.m_Allocations)}
, m_FreeList{Take(other.m_FreeList)}
{
}
SyncServer &
SyncServer::operator=(SyncServer &&other) noexcept
{
if (this == &other)
return *this;
m_Device = Take(other.m_Device);
m_Allocations = std::move(other.m_Allocations);
m_FreeList = Take(other.m_FreeList);
return *this;
}

View File

@ -2,4 +2,4 @@
cmake_minimum_required(VERSION 3.13)
target_sources(aster_core PRIVATE "logger.cpp")
target_sources(aster_core PRIVATE "logger.cpp" "files.cpp")

View File

@ -0,0 +1,79 @@
// =============================================
// Aster: files.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "aster/util/files.h"
eastl::vector<u32>
ReadFile(std::string_view fileName)
{
FILE *filePtr = fopen(fileName.data(), "rb");
if (!filePtr)
{
ERROR("Invalid read of {}", fileName) THEN_ABORT(-1);
}
eastl::vector<u32> outputVec;
eastl::array<u32, 1024> buffer{};
usize totalRead = 0;
usize readCount;
do
{
readCount = fread(buffer.data(), sizeof(u32), buffer.size(), filePtr);
auto const nextSize = totalRead + readCount;
outputVec.resize(nextSize);
memcpy(outputVec.data() + totalRead, buffer.data(), readCount * sizeof *buffer.data());
totalRead = nextSize;
} while (readCount == buffer.size());
return outputVec;
}
eastl::vector<u8>
ReadFileBytes(std::string_view fileName, bool errorOnFail)
{
FILE *filePtr = fopen(fileName.data(), "rb");
if (!filePtr)
{
ERROR_IF(errorOnFail, "Invalid open (r) of {}. Cause: {}", fileName, errno);
return {};
}
eastl::vector<u8> outputVec;
eastl::array<u8, 4096> buffer{};
usize totalRead = 0;
usize readCount;
do
{
readCount = fread(buffer.data(), sizeof(u8), buffer.size(), filePtr);
auto const nextSize = totalRead + readCount;
outputVec.resize(nextSize);
memcpy(outputVec.data() + totalRead, buffer.data(), readCount * sizeof *buffer.data());
totalRead = nextSize;
} while (readCount == buffer.size());
(void)fclose(filePtr);
return outputVec;
}
bool
WriteFileBytes(std::string_view fileName, eastl::span<u8> const data)
{
FILE *filePtr = fopen(fileName.data(), "wb");
if (!filePtr)
{
ERROR("Invalid open (w) of {}. Cause: {}", fileName, errno);
return false;
}
usize const written = fwrite(data.data(), sizeof(u8), data.size(), filePtr);
(void)fclose(filePtr);
return written == data.size();
}

View File

@ -1,18 +1,18 @@
// =============================================
// Aster: logger.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "util/logger.h"
Logger g_Logger = Logger();
auto g_Logger = Logger();
// ReSharper disable once CppInconsistentNaming
/* Credits to Const-me */
namespace eastl
{
void
AssertionFailure(const char *af)
AssertionFailure(char const *af)
{
ERROR("{}", af);
}

View File

@ -1,31 +0,0 @@
#!/usr/bin/env bash
echo "Running CMake"
if grep 'NAME=NixOS' /etc/os-release
then
cmake --preset nixos
else
cmake --preset linux
fi
echo "Running Ninja"
if echo "$@" | grep -e "clean" -q
then
cmake --build build --target clean
elif echo "$@" | grep -e "rebuild" -q
then
cmake --build build --clean-first
else
cmake --build build
fi
if echo "$@" | grep -e "docs" -q
then
if echo "$@" | grep -e "-v" -q
then
doxygen
else
doxygen > /dev/null || echo "Doxygen Failed"
fi
fi

View File

@ -20,16 +20,16 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1738734093,
"narHash": "sha256-UEYOKfXXKU49fR7dGB05As0s2pGbLK4xDo48Qtdm7xs=",
"owner": "NixOS",
"lastModified": 1742976680,
"narHash": "sha256-Lcyi6YyR0PgN5rOrmM6mM/1MJIYhGi6rrq0+eiqvUb4=",
"owner": "kidrigger",
"repo": "nixpkgs",
"rev": "5b2753b0356d1c951d7a3ef1d086ba5a71fff43c",
"rev": "51cf54bdbd9c1a0a2f833cced82451df0d9c25bd",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"owner": "kidrigger",
"ref": "imgui-docking",
"repo": "nixpkgs",
"type": "github"
}

View File

@ -1,6 +1,6 @@
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
nixpkgs.url = "github:kidrigger/nixpkgs/imgui-docking";
flake-utils.url = "github:numtide/flake-utils";
};
outputs = {self, nixpkgs, flake-utils }:
@ -17,7 +17,7 @@
with pkgs;
{
devShells.default = clangStdenv.mkDerivation {
name = "BlazeEnv";
name = "Aster-Env";
nativeBuildInputs = [
@ -26,11 +26,10 @@
ccls
clang-tools
lldb
(imgui.override {IMGUI_BUILD_VULKAN_BINDING = true; IMGUI_BUILD_GLFW_BINDING=true; })
(imgui.override {IMGUI_BUILD_VULKAN_BINDING = true; IMGUI_BUILD_GLFW_BINDING=true; IMGUI_EXPERIMENTAL_DOCKING = true; })
];
buildInputs = [
sdl3
glm
glfw3
eastl
@ -50,6 +49,7 @@
directx-shader-compiler
glslang
shaderc
shader-slang
];
};
}

14
run.sh
View File

@ -1,14 +0,0 @@
#!/usr/bin/env bash
if [ -d "build" ]; then
pushd ./build/samples/04_scenes/ > /dev/null || exit
if echo "$@" | grep -e "debug" -q
then
lldb ./scene_render
else
./scene_render
fi
popd > /dev/null || exit
else
echo "Build Aster first."
fi

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: frame.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "frame.h"
@ -17,7 +17,7 @@ Frame::Frame(const Device *device, const u32 queueFamilyIndex, const u32 frameCo
m_Device = device;
eastl::fixed_string<char, 50, false> name = "Frame ";
name += Cast<char>('0' + frameCount);
name += static_cast<char>('0' + frameCount);
const vk::CommandPoolCreateInfo commandPoolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient,
.queueFamilyIndex = queueFamilyIndex,
@ -71,7 +71,7 @@ Frame::Present(const vk::Queue commandQueue, Swapchain *swapchain, const Surface
case vk::Result::eErrorOutOfDateKHR:
case vk::Result::eSuboptimalKHR:
DEBUG("Recreating Swapchain. Cause: {}", result);
swapchain->Create(surface, size);
swapchain->Create(*surface, size);
break; // Present failed. We do nothing. Frame is skipped.
default:
AbortIfFailedM(result, "Swapchain Present failed.");
@ -154,7 +154,7 @@ FrameManager::GetNextFrame(Swapchain *swapchain, const Surface *surface, Size2D
break; // Image acquired. Break out of loop.
case vk::Result::eErrorOutOfDateKHR:
DEBUG("Recreating Swapchain. Cause: {}", result);
swapchain->Create(surface, size);
swapchain->Create(*surface, size);
break; // Image acquire has failed. We move to the next frame.
default:
AbortIfFailedMV(result, "Waiting for swapchain image {} failed.", frameIndex);

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: frame.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
@ -31,7 +31,7 @@ struct Frame
// Transient
u32 m_ImageIdx;
void Present(const vk::Queue commandQueue, Swapchain* swapchain, const Surface* surface, Size2D size);
void Present(vk::Queue commandQueue, Swapchain *swapchain, const Surface *surface, Size2D size);
Frame(const Device *device, u32 queueFamilyIndex, u32 frameCount);
~Frame();

View File

@ -1,13 +1,14 @@
// =============================================
// Aster: gui.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "gui.h"
#include "aster/core/context.h"
#include "aster/core/device.h"
#include "aster/core/instance.h"
#include "aster/core/window.h"
#include "aster/systems/rendering_device.h"
#include "helpers.h"
#include <imgui_impl_glfw.h>
@ -26,8 +27,74 @@ VulkanAssert(VkResult result)
}
void
Init(const Context *context, const Device *device, const Window *window, vk::Format attachmentFormat,
const u32 imageCount, const u32 queueFamily, const vk::Queue queue)
Init(systems::RenderingDevice &device, Window &window)
{
g_AttachmentFormat = device.m_Swapchain.m_Format;
eastl::vector<vk::DescriptorPoolSize> poolSizes = {
{vk::DescriptorType::eSampler, 1000},
{vk::DescriptorType::eCombinedImageSampler, 1000},
{vk::DescriptorType::eSampledImage, 1000},
{vk::DescriptorType::eStorageImage, 1000},
{vk::DescriptorType::eUniformTexelBuffer, 1000},
{vk::DescriptorType::eStorageTexelBuffer, 1000},
{vk::DescriptorType::eUniformBuffer, 1000},
{vk::DescriptorType::eStorageBuffer, 1000},
{vk::DescriptorType::eUniformBufferDynamic, 1000},
{vk::DescriptorType::eStorageBufferDynamic, 1000},
{vk::DescriptorType::eInputAttachment, 1000},
};
vk::DescriptorPoolCreateInfo const descriptorPoolCreateInfo = {
.flags = vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet,
.maxSets = 1000,
.poolSizeCount = static_cast<u32>(poolSizes.size()),
.pPoolSizes = poolSizes.data(),
};
AbortIfFailed(device.m_Device->createDescriptorPool(&descriptorPoolCreateInfo, nullptr, &g_DescriptorPool));
IMGUI_CHECKVERSION();
CreateContext();
ImGuiIO &io = GetIO();
(void)io;
// io.ConfigFlags |= ImGuiConfigFlags_DockingEnable;
// io.ConfigFlags |= ImGuiConfigFlags_ViewportsEnable; // Viewports bad
StyleColorsDark();
ImGui_ImplGlfw_InitForVulkan(window.m_Window, true);
vk::PipelineRenderingCreateInfo renderingCreateInfo = {
.colorAttachmentCount = 1,
.pColorAttachmentFormats = &g_AttachmentFormat,
};
// TODO: Switch this into being managed by RenderingDevice.
// m_Instance etc should private.
ImGui_ImplVulkan_InitInfo imguiVulkanInitInfo = {
.Instance = device.m_Instance.m_Instance,
.PhysicalDevice = device.m_Device.m_PhysicalDevice,
.Device = device.m_Device.m_Device,
.QueueFamily = device.m_PrimaryQueueFamily,
.Queue = device.m_PrimaryQueue,
.DescriptorPool = g_DescriptorPool,
.MinImageCount = static_cast<u32>(device.m_Swapchain.m_Images.size()),
.ImageCount = static_cast<u32>(device.m_Swapchain.m_Images.size()),
.PipelineCache = nullptr,
.UseDynamicRendering = true,
.PipelineRenderingCreateInfo = renderingCreateInfo,
.Allocator = nullptr,
.CheckVkResultFn = VulkanAssert,
};
ImGui_ImplVulkan_Init(&imguiVulkanInitInfo);
ImGui_ImplVulkan_CreateFontsTexture();
}
void
Init(Instance const *context, Device const *device, Window const *window, vk::Format attachmentFormat,
u32 const imageCount, u32 const queueFamily, vk::Queue const queue)
{
g_AttachmentFormat = attachmentFormat;
@ -45,10 +112,10 @@ Init(const Context *context, const Device *device, const Window *window, vk::For
{vk::DescriptorType::eInputAttachment, 1000},
};
const vk::DescriptorPoolCreateInfo descriptorPoolCreateInfo = {
vk::DescriptorPoolCreateInfo const descriptorPoolCreateInfo = {
.flags = vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet,
.maxSets = 1000,
.poolSizeCount = Cast<u32>(poolSizes.size()),
.poolSizeCount = static_cast<u32>(poolSizes.size()),
.pPoolSizes = poolSizes.data(),
};
@ -91,9 +158,18 @@ Init(const Context *context, const Device *device, const Window *window, vk::For
}
void
Destroy(const Device *device)
Destroy(systems::RenderingDevice const &device)
{
ImGui_ImplVulkan_Shutdown();
ImGui_ImplGlfw_Shutdown();
DestroyContext();
device.m_Device->destroy(Take(g_DescriptorPool), nullptr);
}
void
Destroy(Device const *device)
{
ImGui_ImplVulkan_Shutdown();
ImGui_ImplGlfw_Shutdown();
DestroyContext();
@ -108,13 +184,13 @@ StartBuild()
ImGui_ImplGlfw_NewFrame();
NewFrame();
static ImGuiDockNodeFlags dockspaceFlags = ImGuiDockNodeFlags_None | ImGuiDockNodeFlags_PassthruCentralNode;
static ImGuiDockNodeFlags dockspaceFlags = ImGuiDockNodeFlags_None | ImGuiDockNodeFlags_PassthruCentralNode;
// We are using the ImGuiWindowFlags_NoDocking flag to make the parent window not dockable into,
// because it would be confusing to have two docking targets within each others.
ImGuiWindowFlags windowFlags = ImGuiWindowFlags_None | ImGuiWindowFlags_NoDocking;
const ImGuiViewport *viewport = GetMainViewport();
ImGuiViewport const *viewport = GetMainViewport();
SetNextWindowPos(viewport->WorkPos);
SetNextWindowSize(viewport->WorkSize);
// SetNextWindowViewport(viewport->ID);
@ -130,18 +206,18 @@ StartBuild()
// all active windows docked into it will lose their parent and become undocked.
// We cannot preserve the docking relationship between an active window and an inactive docking, otherwise
// any change of dockspace/settings would lead to windows being stuck in limbo and never being visible.
PushStyleVar(ImGuiStyleVar_WindowPadding, ImVec2(0.0f, 0.0f));
Begin("DockSpace Demo", nullptr, windowFlags);
PopStyleVar();
PushStyleVar(ImGuiStyleVar_WindowPadding, ImVec2(0.0f, 0.0f));
Begin("DockSpace Demo", nullptr, windowFlags);
PopStyleVar();
PopStyleVar(2);
PopStyleVar(2);
// DockSpace
if (GetIO().ConfigFlags & ImGuiConfigFlags_DockingEnable)
{
const ImGuiID dockspaceId = GetID("MyDockSpace");
DockSpace(dockspaceId, ImVec2(0.0f, 0.0f), dockspaceFlags);
}
// DockSpace
if (GetIO().ConfigFlags & ImGuiConfigFlags_DockingEnable)
{
ImGuiID const dockspaceId = GetID("MyDockSpace");
DockSpace(dockspaceId, ImVec2(0.0f, 0.0f), dockspaceFlags);
}
}
void
@ -161,7 +237,7 @@ EndBuild()
}
void
Draw(const vk::CommandBuffer commandBuffer, const vk::Extent2D extent, const vk::ImageView view)
Draw(vk::CommandBuffer const commandBuffer, vk::Extent2D const extent, vk::ImageView const view)
{
// OPTICK_EVENT();
@ -181,7 +257,7 @@ Draw(const vk::CommandBuffer commandBuffer, const vk::Extent2D extent, const vk:
.clearValue = vk::ClearColorValue{0.0f, 0.0f, 0.0f, 1.0f},
};
const vk::RenderingInfo renderingInfo = {
vk::RenderingInfo const renderingInfo = {
.renderArea = {.extent = extent},
.layerCount = 1,
.colorAttachmentCount = 1,
@ -200,6 +276,36 @@ Draw(const vk::CommandBuffer commandBuffer, const vk::Extent2D extent, const vk:
#endif
}
void
Draw(systems::Frame &frame, systems::GraphicsContext &context)
{
context.BeginDebugRegion("UI Pass", {0.9f, 0.9f, 1.0f, 1.0f});
vk::RenderingAttachmentInfo attachmentInfo = {
.imageView = frame.m_SwapchainImageView,
.imageLayout = vk::ImageLayout::eColorAttachmentOptimal,
.resolveMode = vk::ResolveModeFlagBits::eNone,
.loadOp = vk::AttachmentLoadOp::eLoad,
.storeOp = vk::AttachmentStoreOp::eStore,
.clearValue = vk::ClearColorValue{0.0f, 0.0f, 0.0f, 1.0f},
};
vk::RenderingInfo const renderingInfo = {
.renderArea = {.extent = frame.m_SwapchainSize},
.layerCount = 1,
.colorAttachmentCount = 1,
.pColorAttachments = &attachmentInfo,
.pDepthAttachment = nullptr,
};
context.BeginRendering(renderingInfo);
ImGui_ImplVulkan_RenderDrawData(GetDrawData(), context.GetCommandBuffer());
context.EndRendering();
context.EndDebugRegion();
}
void
PushDisable()
{

View File

@ -1,31 +1,41 @@
// =============================================
// Aster: gui.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "aster/aster.h"
#include "aster/core/device.h"
#include <imgui.h>
struct AttachmentImage;
struct Device;
struct Context;
struct Instance;
struct Window;
struct Swapchain;
namespace systems
{
class RenderingDevice;
class GraphicsContext;
struct Frame;
}
// ReSharper disable once CppInconsistentNaming
namespace ImGui
{
void Init(const Context *context, const Device *device, const Window *window, vk::Format attachmentFormat,
void Init(systems::RenderingDevice &device, Window &window);
void Init(const Instance *context, const Device *device, const Window *window, vk::Format attachmentFormat,
u32 imageCount, u32 queueFamily, vk::Queue queue);
void Destroy(const systems::RenderingDevice &device);
void Destroy(const Device *device);
void Recreate();
void StartBuild();
void EndBuild();
void Draw(vk::CommandBuffer commandBuffer, vk::Extent2D extent, vk::ImageView view);
void Draw(systems::Frame &frame, systems::GraphicsContext &context);
void PushDisable();
void PopDisable();

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: helpers.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "helpers.h"
@ -15,24 +15,24 @@ constexpr QueueSupportFlags REQUIRED_QUEUE_SUPPORT = QueueSupportFlags{} | Queue
QueueSupportFlagBits::eTransfer;
bool
IsSuitableDevice(const PhysicalDevice *physicalDevice)
IsSuitableDevice(PhysicalDevice const *physicalDevice)
{
const bool hasAllRequiredQueues =
std::ranges::any_of(physicalDevice->m_QueueFamilies, [](const auto &queueFamilyProp) {
bool const hasAllRequiredQueues =
std::ranges::any_of(physicalDevice->m_QueueFamilies, [](auto const &queueFamilyProp) {
return (queueFamilyProp.m_Support & REQUIRED_QUEUE_SUPPORT) == REQUIRED_QUEUE_SUPPORT;
});
const bool isNotCpu = physicalDevice->m_DeviceProperties.deviceType != vk::PhysicalDeviceType::eCpu;
bool const isNotCpu = physicalDevice->m_DeviceProperties.deviceType != vk::PhysicalDeviceType::eCpu;
const bool hasPresentMode = !physicalDevice->m_PresentModes.empty();
bool const hasPresentMode = !physicalDevice->m_PresentModes.empty();
const bool hasSurfaceFormat = !physicalDevice->m_SurfaceFormats.empty();
bool const hasSurfaceFormat = !physicalDevice->m_SurfaceFormats.empty();
return hasSurfaceFormat && hasPresentMode && isNotCpu && hasAllRequiredQueues;
}
PhysicalDevice
FindSuitableDevice(const PhysicalDevices &physicalDevices)
FindSuitableDevice(PhysicalDevices const &physicalDevices)
{
for (auto &physicalDevice : physicalDevices)
{
@ -47,7 +47,7 @@ FindSuitableDevice(const PhysicalDevices &physicalDevices)
}
QueueAllocation
FindAppropriateQueueAllocation(const PhysicalDevice *physicalDevice)
FindAppropriateQueueAllocation(PhysicalDevice const *physicalDevice)
{
for (auto &queueFamilyInfo : physicalDevice->m_QueueFamilies)
{
@ -62,76 +62,3 @@ FindAppropriateQueueAllocation(const PhysicalDevice *physicalDevice)
ERROR("No suitable queue family on the GPU.")
THEN_ABORT(vk::Result::eErrorUnknown);
}
eastl::vector<u32>
ReadFile(cstr fileName)
{
FILE *filePtr = fopen(fileName, "rb");
if (!filePtr)
{
ERROR("Invalid read of {}", fileName) THEN_ABORT(-1);
}
eastl::vector<u32> outputVec;
eastl::array<u32, 1024> buffer{};
usize totalRead = 0;
usize readCount;
do
{
readCount = fread(buffer.data(), sizeof(u32), buffer.size(), filePtr);
const auto nextSize = totalRead + readCount;
outputVec.resize(nextSize);
memcpy(outputVec.data() + totalRead, buffer.data(), readCount * sizeof *buffer.data());
totalRead = nextSize;
} while (readCount == buffer.size());
return outputVec;
}
eastl::vector<u8>
ReadFileBytes(cstr fileName, bool errorOnFail)
{
FILE *filePtr = fopen(fileName, "rb");
if (!filePtr)
{
ERROR_IF(errorOnFail, "Invalid open (r) of {}. Cause: {}", fileName, errno);
return {};
}
eastl::vector<u8> outputVec;
eastl::array<u8, 4096> buffer{};
usize totalRead = 0;
usize readCount;
do
{
readCount = fread(buffer.data(), sizeof(u8), buffer.size(), filePtr);
const auto nextSize = totalRead + readCount;
outputVec.resize(nextSize);
memcpy(outputVec.data() + totalRead, buffer.data(), readCount * sizeof *buffer.data());
totalRead = nextSize;
} while (readCount == buffer.size());
(void)fclose(filePtr);
return outputVec;
}
bool
WriteFileBytes(cstr fileName, eastl::span<u8> data)
{
FILE *filePtr = fopen(fileName, "wb");
if (!filePtr)
{
ERROR("Invalid open (w) of {}. Cause: {}", fileName, errno);
return false;
}
const usize written = fwrite(data.data(), sizeof(u8), data.size(), filePtr);
(void)fclose(filePtr);
return written == data.size();
}

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: helpers.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
@ -18,9 +18,6 @@ class PhysicalDevices;
PhysicalDevice FindSuitableDevice(const PhysicalDevices &physicalDevices);
QueueAllocation FindAppropriateQueueAllocation(const PhysicalDevice *physicalDevice);
eastl::vector<u32> ReadFile(cstr fileName);
eastl::vector<u8> ReadFileBytes(cstr fileName, bool errorOnFail = true);
bool WriteFileBytes(cstr fileName, eastl::span<u8> data);
template <usize TSize>
using StackString = eastl::fixed_string<char, TSize, false>;
@ -29,7 +26,7 @@ using StackString = eastl::fixed_string<char, TSize, false>;
do \
{ \
vk::Result _checkResultValue_; \
ERROR_IF(Failed(_checkResultValue_ = Cast<vk::Result>(RESULT)), "Cause: {}", _checkResultValue_) \
ERROR_IF(Failed(_checkResultValue_ = static_cast<vk::Result>(RESULT)), "Cause: {}", _checkResultValue_) \
THEN_ABORT(_checkResultValue_); \
} while (false)
@ -37,13 +34,13 @@ using StackString = eastl::fixed_string<char, TSize, false>;
do \
{ \
vk::Result _checkResultValue_; \
ERROR_IF(Failed(_checkResultValue_ = Cast<vk::Result>(RESULT)), MSG " Cause: {}", EXTRA, _checkResultValue_) \
ERROR_IF(Failed(_checkResultValue_ = static_cast<vk::Result>(RESULT)), MSG " Cause: {}", EXTRA, _checkResultValue_) \
THEN_ABORT(_checkResultValue_); \
} while (false)
#define AbortIfFailedM(RESULT, MSG) \
do \
{ \
auto _checkResultValue_ = Cast<vk::Result>(RESULT); \
auto _checkResultValue_ = static_cast<vk::Result>(RESULT); \
ERROR_IF(Failed(_checkResultValue_), MSG " Cause: {}", _checkResultValue_) THEN_ABORT(_checkResultValue_); \
} while (false)

View File

@ -3,8 +3,8 @@
cmake_minimum_required(VERSION 3.13)
add_executable(triangle "triangle.cpp")
add_shader(triangle "shader/triangle.vert.glsl")
add_shader(triangle "shader/triangle.frag.glsl")
add_shader(triangle "shader/triangle.slang")
add_resource_dir(triangle "shader")
target_link_libraries(triangle PRIVATE aster_core)
target_link_libraries(triangle PRIVATE util_helper)

View File

@ -1,9 +0,0 @@
#version 450
#pragma shader_stage(fragment)
layout (location = 0) in vec3 inColor;
layout (location = 0) out vec4 outColor;
void main() {
outColor = vec4(inColor, 1.0);
}

View File

@ -0,0 +1,35 @@
struct Vertex {
float3 point;
float3 color;
};
struct VSIn {
Vertex vertex;
};
struct VSOut
{
float4 Pos : SV_POSITION;
float3 Color : COLOR0;
};
[shader("vertex")]
VSOut vsmain(VSIn input) {
VSOut output;
output.Pos = float4(input.vertex.point, 1.0f);
output.Color = input.vertex.color;
return output;
}
struct FSOut {
float4 Color;
};
[shader("fragment")]
FSOut fsmain(VSOut input) {
FSOut outp;
outp.Color = float4(input.Color, 1.0);
return outp;
}

View File

@ -1,27 +0,0 @@
#version 450
#pragma shader_stage(vertex)
layout(location=0) in vec4 position;
layout(location=1) in vec4 color;
layout(location=0) out vec3 outColor;
void main() {
/*
vec3 points[] = {
vec3(-0.5f, -0.5f, 0.0f),
vec3(0.5f, -0.5f, 0.0f),
vec3(0.0f, 0.5f, 0.0f)
};
vec3 colors[] = {
vec3( 1.0f, 0.0f, 0.0f ),
vec3( 0.0f, 1.0f, 0.0f ),
vec3( 0.0f, 0.0f, 1.0f ),
};
gl_Position = vec4(points[gl_VertexIndex], 1.0f);
outColor = vec3(colors[gl_VertexIndex]); //*/
//*
gl_Position = vec4(position.xyz, 1.0f);
outColor = vec3(color.rgb); //*/
}

View File

@ -1,28 +0,0 @@
struct VSIn {
int idx : SV_VERTEXID;
};
struct VSOut
{
float4 Pos : SV_POSITION;
[[vk::location(0)]] float3 Color : COLOR0;
};
VSOut main(VSIn input) {
float3 points[] = {
float3(-0.5f, -0.5f, 0.0f),
float3(0.5f, -0.5f, 0.0f),
float3(0.0f, 0.5f, 0.0f)
};
float3 colors[] = {
float3( 1.0f, 0.0f, 0.0f ),
float3( 0.0f, 1.0f, 0.0f ),
float3( 0.0f, 0.0f, 1.0f ),
};
VSOut output;
output.Pos = float4(points[input.idx], 1.0f);
output.Color = colors[input.idx];
return output;
}

View File

@ -1,115 +1,78 @@
// =============================================
// Aster: triangle.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "aster/aster.h"
#include "aster/core/buffer.h"
#include "aster/core/constants.h"
#include "aster/core/context.h"
#include "aster/core/device.h"
#include "aster/core/instance.h"
#include "aster/core/physical_device.h"
#include "aster/core/pipeline.h"
#include "aster/core/swapchain.h"
#include "aster/core/window.h"
#include "aster/core/pipeline.h"
#include "aster/systems/rendering_device.h"
#include "aster/util/files.h"
#include "helpers.h"
#include <EASTL/array.h>
constexpr u32 MAX_FRAMES_IN_FLIGHT = 3;
constexpr auto VERTEX_SHADER_FILE = "shader/triangle.vert.glsl.spv";
constexpr auto FRAGMENT_SHADER_FILE = "shader/triangle.frag.glsl.spv";
vk::ShaderModule CreateShader(const Device *device, cstr shaderFile);
Pipeline CreatePipeline(const Device *device, const Swapchain *swapchain);
constexpr auto SHADER_MODULE = "triangle.slang";
struct Vertex
{
vec3 m_Position;
vec3 m_Color;
constexpr static vk::VertexInputBindingDescription
GetBinding(const u32 binding)
{
return {.binding = binding, .stride = sizeof(Vertex), .inputRate = vk::VertexInputRate::eVertex};
}
constexpr static eastl::array<vk::VertexInputAttributeDescription, 2>
GetAttributes(const u32 binding)
static eastl::vector<systems::AttributeInfo>
GetAttributes()
{
return {
vk::VertexInputAttributeDescription{
.location = 0,
.binding = binding,
.format = vk::Format::eR32G32B32Sfloat,
.offset = offsetof(Vertex, m_Position),
{
.m_Location = 0,
.m_Offset = offsetof(Vertex, m_Position),
.m_Format = systems::AttributeInfo::Format::eFloat32X3,
},
vk::VertexInputAttributeDescription{
.location = 1,
.binding = binding,
.format = vk::Format::eR32G32B32Sfloat,
.offset = offsetof(Vertex, m_Color),
{
.m_Location = 1,
.m_Offset = offsetof(Vertex, m_Color),
.m_Format = systems::AttributeInfo::Format::eFloat32X3,
},
};
}
};
struct Frame
{
const Device *m_Device;
vk::CommandPool m_Pool;
vk::CommandBuffer m_CommandBuffer;
vk::Fence m_FrameAvailableFence;
vk::Semaphore m_ImageAcquireSem;
vk::Semaphore m_RenderFinishSem;
Frame(const Device *device, u32 queueFamilyIndex, u32 frameCount);
~Frame();
};
int
main(int, char **)
{
MIN_LOG_LEVEL(Logger::LogType::eInfo);
Window window = {"Triangle (Aster)", {640, 480}};
Context context = {"Triangle", VERSION};
Surface surface = {&context, &window, "Primary"};
systems::RenderingDevice device{{
.m_Window = window,
.m_Features = {.m_Vulkan12Features = {.bufferDeviceAddress = true},
.m_Vulkan13Features = {.synchronization2 = true, .dynamicRendering = true}},
.m_AppName = "Triangle",
.m_ShaderSearchPaths = {"shader/"},
.m_UseBindless = false,
.m_Name = "Primary",
}};
PhysicalDevices physicalDevices = {&surface, &context};
PhysicalDevice deviceToUse = FindSuitableDevice(physicalDevices);
INFO("Using {} as the primary device.", deviceToUse.m_DeviceProperties.deviceName.data());
Features enabledDeviceFeatures = {
.m_Vulkan12Features = {.bufferDeviceAddress = true},
.m_Vulkan13Features = {.synchronization2 = true, .dynamicRendering = true},
};
QueueAllocation queueAllocation = FindAppropriateQueueAllocation(&deviceToUse);
Device device = {&context, &deviceToUse, &enabledDeviceFeatures, {queueAllocation}, "Primary Device"};
vk::Queue commandQueue = device.GetQueue(queueAllocation.m_Family, 0);
Swapchain swapchain = {&surface, &device, window.GetSize(), "Primary Chain"};
Pipeline pipeline = CreatePipeline(&device, &swapchain);
vk::CommandPool copyPool;
vk::CommandBuffer copyBuffer;
{
vk::CommandPoolCreateInfo poolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient,
.queueFamilyIndex = queueAllocation.m_Family,
};
auto result = device.m_Device.createCommandPool(&poolCreateInfo, nullptr, &copyPool);
ERROR_IF(Failed(result), "Copy command pool creation failed. Cause: {}", result) THEN_ABORT(result);
vk::CommandBufferAllocateInfo bufferAllocateInfo = {
.commandPool = copyPool,
.level = vk::CommandBufferLevel::ePrimary,
.commandBufferCount = 1,
};
result = device.m_Device.allocateCommandBuffers(&bufferAllocateInfo, &copyBuffer);
ERROR_IF(Failed(result), "Copy command buffer allocation failed. Cause: {}", result) THEN_ABORT(result);
}
Pipeline pipeline;
auto pipelineError = device.CreateGraphicsPipeline(pipeline, {
.m_VertexInputs = {{
.m_Attribute = Vertex::GetAttributes(),
.m_Stride = sizeof(Vertex),
}},
.m_Shaders = {{
.m_ShaderFile = SHADER_MODULE,
.m_EntryPoints = {"vsmain", "fsmain"},
}},
});
ERROR_IF(pipelineError, "Error creating pipeline. Cause: {}", pipelineError.What());
// eastl::array<Vertex, 3> vertices{};
eastl::array vertices = {
@ -117,60 +80,10 @@ main(int, char **)
Vertex{.m_Position = {0.5f, -0.5f, 0.0f}, .m_Color = {0.0f, 1.0f, 0.0f}},
Vertex{.m_Position = {0.0f, 0.5f, 0.0f}, .m_Color = {0.0f, 0.0f, 1.0f}},
};
VertexBuffer vbo;
vbo.Init(&device, vertices.size() * sizeof vertices[0], "VBO");
{
StagingBuffer staging;
staging.Init(&device, vertices.size() * sizeof vertices[0], "Staging");
staging.Write(&device, 0, vertices.size() * sizeof vertices[0], vertices.data());
vk::Fence fence;
vk::FenceCreateInfo fenceCreateInfo = {};
auto result = device.m_Device.createFence(&fenceCreateInfo, nullptr, &fence);
ERROR_IF(Failed(result), "Fence creation failed. Cause: {}", result) THEN_ABORT(result);
vk::CommandBufferBeginInfo beginInfo = {.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit};
result = copyBuffer.begin(&beginInfo);
ERROR_IF(Failed(result), "Copy begin failed. Cause: {}", result) THEN_ABORT(result);
vk::BufferCopy bufferCopy = {.srcOffset = 0, .dstOffset = 0, .size = staging.GetSize()};
copyBuffer.copyBuffer(staging.m_Buffer, vbo.m_Buffer, 1, &bufferCopy);
result = copyBuffer.end();
ERROR_IF(Failed(result), "Copy end failed. Cause: {}", result) THEN_ABORT(result);
vk::SubmitInfo submitInfo = {
.commandBufferCount = 1,
.pCommandBuffers = &copyBuffer,
};
result = commandQueue.submit(1, &submitInfo, fence);
ERROR_IF(Failed(result), "Submit failed. Cause: {}", result) THEN_ABORT(result) ELSE_INFO("Submit copy");
result = device.m_Device.waitForFences(1, &fence, true, MaxValue<u64>);
ERROR_IF(Failed(result), "Fence wait failed. Cause: {}", result) THEN_ABORT(result) ELSE_INFO("Fence wait");
result = device.m_Device.resetCommandPool(copyPool, {});
ERROR_IF(Failed(result), "Couldn't reset command pool. Cause: {}", result) THEN_ABORT(result);
device.m_Device.destroy(fence, nullptr);
staging.Destroy(&device);
}
auto vbo = device.CreateVertexBuffer(vertices.size() * sizeof vertices[0], "VBO");
vbo->Write(0, vertices.size() * sizeof vertices[0], vertices.data());
// Persistent variables
vk::Viewport viewport = {
.x = 0,
.y = Cast<f32>(swapchain.m_Extent.height),
.width = Cast<f32>(swapchain.m_Extent.width),
.height = -Cast<f32>(swapchain.m_Extent.height),
.minDepth = 0.0,
.maxDepth = 1.0,
};
vk::Rect2D scissor = {
.offset = {0, 0},
.extent = swapchain.m_Extent,
};
vk::ImageSubresourceRange subresourceRange = {
.aspectMask = vk::ImageAspectFlagBits::eColor,
@ -186,8 +99,8 @@ main(int, char **)
.dstAccessMask = vk::AccessFlagBits2::eColorAttachmentWrite,
.oldLayout = vk::ImageLayout::eUndefined,
.newLayout = vk::ImageLayout::eColorAttachmentOptimal,
.srcQueueFamilyIndex = queueAllocation.m_Family,
.dstQueueFamilyIndex = queueAllocation.m_Family,
.srcQueueFamilyIndex = vk::QueueFamilyIgnored,
.dstQueueFamilyIndex = vk::QueueFamilyIgnored,
.subresourceRange = subresourceRange,
};
vk::DependencyInfo topOfThePipeDependency = {
@ -201,8 +114,8 @@ main(int, char **)
.dstAccessMask = vk::AccessFlagBits2::eNone,
.oldLayout = vk::ImageLayout::eColorAttachmentOptimal,
.newLayout = vk::ImageLayout::ePresentSrcKHR,
.srcQueueFamilyIndex = queueAllocation.m_Family,
.dstQueueFamilyIndex = queueAllocation.m_Family,
.srcQueueFamilyIndex = vk::QueueFamilyIgnored,
.dstQueueFamilyIndex = vk::QueueFamilyIgnored,
.subresourceRange = subresourceRange,
};
vk::DependencyInfo renderToPresentDependency = {
@ -210,70 +123,39 @@ main(int, char **)
.pImageMemoryBarriers = &renderToPresentBarrier,
};
// Frames
eastl::fixed_vector<Frame, MAX_FRAMES_IN_FLIGHT> frames;
for (u32 i = 0; i < MAX_FRAMES_IN_FLIGHT; ++i)
{
frames.emplace_back(&device, queueAllocation.m_Family, i);
}
INFO("Starting loop");
u32 frameIndex = 0;
while (window.Poll())
{
Frame *currentFrame = &frames[frameIndex];
systems::Frame &currentFrame = device.GetNextFrame();
auto result = device.m_Device.waitForFences(1, &currentFrame->m_FrameAvailableFence, true, MaxValue<u64>);
ERROR_IF(Failed(result), "Waiting for fence {} failed. Cause: {}", frameIndex, result)
THEN_ABORT(result);
Size2D swapchainSize = currentFrame.m_SwapchainSize;
u32 imageIndex;
result = device.m_Device.acquireNextImageKHR(swapchain.m_Swapchain, MaxValue<u64>,
currentFrame->m_ImageAcquireSem, nullptr, &imageIndex);
if (Failed(result))
{
switch (result)
{
case vk::Result::eErrorOutOfDateKHR:
case vk::Result::eSuboptimalKHR:
INFO("Recreating Swapchain. Cause: {}", result);
swapchain.Create(&surface, window.GetSize());
viewport.y = Cast<f32>(swapchain.m_Extent.height);
viewport.width = Cast<f32>(swapchain.m_Extent.width);
viewport.height = -Cast<f32>(swapchain.m_Extent.height);
scissor.extent = swapchain.m_Extent;
continue; // Image acquire has failed. We move to the next frame.
default:
ERROR("Waiting for swapchain image {} failed. Cause: {}", frameIndex, result)
THEN_ABORT(result);
}
}
// Reset fences here. In case swapchain was out of date, we leave the fences signalled.
result = device.m_Device.resetFences(1, &currentFrame->m_FrameAvailableFence);
ERROR_IF(Failed(result), "Fence {} reset failed. Cause: {}", frameIndex, result)
THEN_ABORT(result);
vk::Viewport viewport = {
.x = 0,
.y = static_cast<f32>(swapchainSize.m_Height),
.width = static_cast<f32>(swapchainSize.m_Width),
.height = -static_cast<f32>(swapchainSize.m_Height),
.minDepth = 0.0,
.maxDepth = 1.0,
};
result = device.m_Device.resetCommandPool(currentFrame->m_Pool, {});
ERROR_IF(Failed(result), "Command pool {} reset failed. Cause: {}", frameIndex, result)
THEN_ABORT(result);
vk::Rect2D scissor = {
.offset = {0, 0},
.extent = static_cast<vk::Extent2D>(swapchainSize),
};
vk::ImageView currentImageView = swapchain.m_ImageViews[imageIndex];
vk::Image currentImage = swapchain.m_Images[imageIndex];
vk::CommandBuffer cmd = currentFrame->m_CommandBuffer;
auto context = currentFrame.CreateGraphicsContext();
topOfThePipeBarrier.image = currentImage;
renderToPresentBarrier.image = currentImage;
topOfThePipeBarrier.image = currentFrame.m_SwapchainImage;
renderToPresentBarrier.image = currentFrame.m_SwapchainImage;
vk::CommandBufferBeginInfo beginInfo = {.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit};
result = cmd.begin(&beginInfo);
ERROR_IF(Failed(result), "Command buffer begin failed. Cause: {}", result)
THEN_ABORT(result);
context.Begin();
cmd.pipelineBarrier2(&topOfThePipeDependency);
context.Dependency(topOfThePipeDependency);
// Render
vk::RenderingAttachmentInfo attachmentInfo = {
.imageView = currentImageView,
.imageView = currentFrame.m_SwapchainImageView,
.imageLayout = vk::ImageLayout::eColorAttachmentOptimal,
.resolveMode = vk::ResolveModeFlagBits::eNone,
.loadOp = vk::AttachmentLoadOp::eClear,
@ -282,265 +164,29 @@ main(int, char **)
};
vk::RenderingInfo renderingInfo = {
.renderArea = {.extent = swapchain.m_Extent},
.renderArea = scissor,
.layerCount = 1,
.colorAttachmentCount = 1,
.pColorAttachments = &attachmentInfo,
};
cmd.beginRendering(&renderingInfo);
context.BeginRendering(renderingInfo);
cmd.setViewport(0, 1, &viewport);
cmd.setScissor(0, 1, &scissor);
cmd.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline.m_Pipeline);
usize offsets = 0;
cmd.bindVertexBuffers(0, 1, &vbo.m_Buffer, &offsets);
cmd.draw(3, 1, 0, 0);
context.SetViewport(viewport);
context.BindPipeline(pipeline);
context.BindVertexBuffer(vbo);
context.Draw(3);
cmd.endRendering();
context.EndRendering();
cmd.pipelineBarrier2(&renderToPresentDependency);
context.Dependency(renderToPresentDependency);
result = cmd.end();
ERROR_IF(Failed(result), "Command buffer end failed. Cause: {}", result)
THEN_ABORT(result);
context.End();
vk::PipelineStageFlags waitDstStage = vk::PipelineStageFlagBits::eColorAttachmentOutput;
vk::SubmitInfo submitInfo = {
.waitSemaphoreCount = 1,
.pWaitSemaphores = &currentFrame->m_ImageAcquireSem,
.pWaitDstStageMask = &waitDstStage,
.commandBufferCount = 1,
.pCommandBuffers = &cmd,
.signalSemaphoreCount = 1,
.pSignalSemaphores = &currentFrame->m_RenderFinishSem,
};
result = commandQueue.submit(1, &submitInfo, currentFrame->m_FrameAvailableFence);
ERROR_IF(Failed(result), "Command queue submit failed. Cause: {}", result)
THEN_ABORT(result);
vk::PresentInfoKHR presentInfo = {
.waitSemaphoreCount = 1,
.pWaitSemaphores = &currentFrame->m_RenderFinishSem,
.swapchainCount = 1,
.pSwapchains = &swapchain.m_Swapchain,
.pImageIndices = &imageIndex,
.pResults = nullptr,
};
result = commandQueue.presentKHR(&presentInfo);
if (Failed(result))
{
switch (result)
{
case vk::Result::eErrorOutOfDateKHR:
case vk::Result::eSuboptimalKHR:
INFO("Recreating Swapchain. Cause: {}", result);
swapchain.Create(&surface, window.GetSize());
viewport.y = Cast<f32>(swapchain.m_Extent.height);
viewport.width = Cast<f32>(swapchain.m_Extent.width);
viewport.height = -Cast<f32>(swapchain.m_Extent.height);
scissor.extent = swapchain.m_Extent;
break; // Present failed. We redo the frame.
default:
ERROR("Command queue present failed. Cause: {}", result)
THEN_ABORT(result);
}
}
frameIndex = (frameIndex + 1) % MAX_FRAMES_IN_FLIGHT;
device.Present(currentFrame, context);
}
device.WaitIdle();
device.m_Device.destroy(copyPool, nullptr);
vbo.Destroy(&device);
return 0;
}
Frame::Frame(const Device *device, const u32 queueFamilyIndex, const u32 frameCount)
{
m_Device = device;
const vk::CommandPoolCreateInfo commandPoolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient,
.queueFamilyIndex = queueFamilyIndex,
};
vk::Result result = device->m_Device.createCommandPool(&commandPoolCreateInfo, nullptr, &m_Pool);
ERROR_IF(Failed(result), "Could not command pool for frame {}. Cause: {}", frameCount, result)
THEN_ABORT(result);
constexpr vk::FenceCreateInfo fenceCreateInfo = {.flags = vk::FenceCreateFlagBits::eSignaled};
result = device->m_Device.createFence(&fenceCreateInfo, nullptr, &m_FrameAvailableFence);
ERROR_IF(Failed(result), "Could not create a fence for frame {}. Cause: {}", frameCount, result)
THEN_ABORT(result);
constexpr vk::SemaphoreCreateInfo semaphoreCreateInfo = {};
result = device->m_Device.createSemaphore(&semaphoreCreateInfo, nullptr, &m_ImageAcquireSem);
ERROR_IF(Failed(result), "Could not create IA semaphore for frame {}. Cause: {}", frameCount, result)
THEN_ABORT(result);
result = device->m_Device.createSemaphore(&semaphoreCreateInfo, nullptr, &m_RenderFinishSem);
ERROR_IF(Failed(result), "Could not create RF semaphore for frame {}. Cause: {}", frameCount, result)
THEN_ABORT(result);
const vk::CommandBufferAllocateInfo allocateInfo = {
.commandPool = m_Pool, .level = vk::CommandBufferLevel::ePrimary, .commandBufferCount = 1};
result = m_Device->m_Device.allocateCommandBuffers(&allocateInfo, &m_CommandBuffer);
ERROR_IF(Failed(result), "Command buffer allocation failed. Cause: {}", result)
THEN_ABORT(result);
DEBUG("Frame {} created successfully.", frameCount);
}
Pipeline
CreatePipeline(const Device *device, const Swapchain *swapchain)
{
// Pipeline Setup
auto vertexShaderModule = CreateShader(device, VERTEX_SHADER_FILE);
auto fragmentShaderModule = CreateShader(device, FRAGMENT_SHADER_FILE);
eastl::array<vk::PipelineShaderStageCreateInfo, 2> shaderStages = {{
{
.stage = vk::ShaderStageFlagBits::eVertex,
.module = vertexShaderModule,
.pName = "main",
},
{
.stage = vk::ShaderStageFlagBits::eFragment,
.module = fragmentShaderModule,
.pName = "main",
},
}};
vk::PipelineLayoutCreateInfo pipelineLayoutCreateInfo = {
.setLayoutCount = 0,
.pSetLayouts = nullptr,
.pushConstantRangeCount = 0,
.pPushConstantRanges = nullptr,
};
vk::PipelineLayout pipelineLayout;
vk::Result result = device->m_Device.createPipelineLayout(&pipelineLayoutCreateInfo, nullptr, &pipelineLayout);
ERROR_IF(Failed(result), "Could not create a pipeline layout. Cause: {}", result) THEN_ABORT(result);
device->SetName(pipelineLayout, "Triangle Layout");
vk::VertexInputBindingDescription inputBindingDescription = Vertex::GetBinding(0);
auto inputAttributeDescription = Vertex::GetAttributes(0);
vk::PipelineVertexInputStateCreateInfo vertexInputStateCreateInfo = {
.vertexBindingDescriptionCount = 1,
.pVertexBindingDescriptions = &inputBindingDescription,
.vertexAttributeDescriptionCount = Cast<u32>(inputAttributeDescription.size()),
.pVertexAttributeDescriptions = inputAttributeDescription.data(),
};
vk::PipelineInputAssemblyStateCreateInfo inputAssemblyStateCreateInfo = {
.topology = vk::PrimitiveTopology::eTriangleList,
.primitiveRestartEnable = false,
};
vk::PipelineViewportStateCreateInfo viewportStateCreateInfo = {
.viewportCount = 1,
.scissorCount = 1,
};
vk::PipelineRasterizationStateCreateInfo rasterizationStateCreateInfo = {
.depthClampEnable = false,
.rasterizerDiscardEnable = false,
.polygonMode = vk::PolygonMode::eFill,
.cullMode = vk::CullModeFlagBits::eNone,
.frontFace = vk::FrontFace::eCounterClockwise,
.depthBiasEnable = false,
.lineWidth = 1.0,
};
vk::PipelineMultisampleStateCreateInfo multisampleStateCreateInfo = {
.rasterizationSamples = vk::SampleCountFlagBits::e1,
.sampleShadingEnable = false,
};
vk::PipelineDepthStencilStateCreateInfo depthStencilStateCreateInfo = {
.depthTestEnable = false,
.depthWriteEnable = false,
};
vk::PipelineColorBlendAttachmentState colorBlendAttachmentState = {
.blendEnable = false,
.srcColorBlendFactor = vk::BlendFactor::eSrcColor,
.dstColorBlendFactor = vk::BlendFactor::eOneMinusSrcColor,
.colorBlendOp = vk::BlendOp::eAdd,
.srcAlphaBlendFactor = vk::BlendFactor::eSrcAlpha,
.dstAlphaBlendFactor = vk::BlendFactor::eOneMinusSrcAlpha,
.alphaBlendOp = vk::BlendOp::eAdd,
.colorWriteMask = vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG |
vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA,
};
vk::PipelineColorBlendStateCreateInfo colorBlendStateCreateInfo = {
.logicOpEnable = false,
.attachmentCount = 1,
.pAttachments = &colorBlendAttachmentState,
};
eastl::array dynamicStates = {
vk::DynamicState::eScissor,
vk::DynamicState::eViewport,
};
vk::PipelineDynamicStateCreateInfo dynamicStateCreateInfo = {
.dynamicStateCount = Cast<u32>(dynamicStates.size()),
.pDynamicStates = dynamicStates.data(),
};
vk::PipelineRenderingCreateInfo renderingCreateInfo = {
.viewMask = 0,
.colorAttachmentCount = 1,
.pColorAttachmentFormats = &swapchain->m_Format,
};
vk::GraphicsPipelineCreateInfo pipelineCreateInfo = {
.pNext = &renderingCreateInfo,
.stageCount = Cast<u32>(shaderStages.size()),
.pStages = shaderStages.data(),
.pVertexInputState = &vertexInputStateCreateInfo,
.pInputAssemblyState = &inputAssemblyStateCreateInfo,
.pViewportState = &viewportStateCreateInfo,
.pRasterizationState = &rasterizationStateCreateInfo,
.pMultisampleState = &multisampleStateCreateInfo,
.pDepthStencilState = &depthStencilStateCreateInfo,
.pColorBlendState = &colorBlendStateCreateInfo,
.pDynamicState = &dynamicStateCreateInfo,
.layout = pipelineLayout,
};
vk::Pipeline pipeline;
result = device->m_Device.createGraphicsPipelines(nullptr, 1, &pipelineCreateInfo, nullptr, &pipeline);
ERROR_IF(Failed(result), "Could not create a graphics pipeline. Cause: {}", result)
THEN_ABORT(result);
device->SetName(pipeline, "Triangle Pipeline");
device->m_Device.destroy(vertexShaderModule, nullptr);
device->m_Device.destroy(fragmentShaderModule, nullptr);
return {device, pipelineLayout, pipeline, {}};
}
vk::ShaderModule
CreateShader(const Device *device, cstr shaderFile)
{
eastl::vector<u32> shaderCode = ReadFile(shaderFile);
const vk::ShaderModuleCreateInfo shaderModuleCreateInfo = {
.codeSize = shaderCode.size() * sizeof(u32),
.pCode = shaderCode.data(),
};
vk::ShaderModule shaderModule;
vk::Result result = device->m_Device.createShaderModule(&shaderModuleCreateInfo, nullptr, &shaderModule);
ERROR_IF(Failed(result), "Shader {} could not be created. Cause: {}", shaderFile, result)
THEN_ABORT(result);
return shaderModule;
}
Frame::~Frame()
{
m_Device->m_Device.destroy(m_RenderFinishSem, nullptr);
m_Device->m_Device.destroy(m_ImageAcquireSem, nullptr);
m_Device->m_Device.destroy(m_FrameAvailableFence, nullptr);
m_Device->m_Device.destroy(m_Pool, nullptr);
DEBUG("Destoryed Frame");
}

View File

@ -5,10 +5,8 @@ cmake_minimum_required(VERSION 3.13)
#set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined -fsanitize=address")
add_executable(box "box.cpp" "stb_image.h")
add_shader(box "shader/box.vert.glsl")
add_shader(box "shader/box.frag.glsl")
add_shader(box "shader/box.vs.hlsl")
add_shader(box "shader/box.ps.hlsl")
add_shader(box "shader/box.slang")
add_resource_dir(box "shader/")
target_link_libraries(box PRIVATE aster_core)
target_link_libraries(box PRIVATE util_helper)

View File

@ -1,33 +1,29 @@
// =============================================
// Aster: box.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "aster/aster.h"
#include "aster/core/buffer.h"
#include "aster/core/constants.h"
#include "aster/core/context.h"
#include "aster/core/device.h"
#include "aster/core/image.h"
#include "aster/core/physical_device.h"
#include "aster/core/pipeline.h"
#include "aster/core/swapchain.h"
#include "aster/core/window.h"
#include "helpers.h"
#define STB_IMAGE_IMPLEMENTATION
#include "aster/systems/buffer_manager.h"
#include "aster/systems/image_manager.h"
#include "frame.h"
#include "aster/systems/commit_manager.h"
#include "aster/systems/rendering_device.h"
#include "aster/util/files.h"
#include "stb_image.h"
#include <EASTL/array.h>
constexpr u32 MAX_FRAMES_IN_FLIGHT = 3;
constexpr auto VERTEX_SHADER_FILE = "shader/box.vs.hlsl.spv";
constexpr auto FRAGMENT_SHADER_FILE = "shader/box.ps.hlsl.spv";
constexpr auto SHADER_FILE = "box";
struct ImageFile
{
@ -38,6 +34,12 @@ struct ImageFile
bool Load(cstr fileName);
[[nodiscard]] usize GetSize() const;
operator eastl::span<u8>() const
{
return {static_cast<u8 *>(m_Data), GetSize()};
}
~ImageFile();
};
@ -63,7 +65,7 @@ ImageFile::Load(cstr fileName)
usize
ImageFile::GetSize() const
{
return Cast<usize>(m_Width) * m_Height * m_NumChannels;
return static_cast<usize>(m_Width) * m_Height * m_NumChannels;
}
ImageFile::~ImageFile()
@ -72,9 +74,6 @@ ImageFile::~ImageFile()
m_Data = nullptr;
}
vk::ShaderModule CreateShader(const Device *device, cstr shaderFile);
Pipeline CreatePipeline(const Device *device, const Swapchain *swapchain);
struct Vertex
{
vec3 m_Position;
@ -93,86 +92,55 @@ struct Camera
int
main(int, char **)
{
MIN_LOG_LEVEL(Logger::LogType::eInfo);
MIN_LOG_LEVEL(Logger::LogType::eDebug);
Window window = {"Box (Aster)", {640, 480}};
Context context = {"Box", VERSION};
Surface surface = {&context, &window, "Primary"};
PhysicalDevices physicalDevices = {&surface, &context};
PhysicalDevice deviceToUse = FindSuitableDevice(physicalDevices);
INFO("Using {} as the primary device.", deviceToUse.m_DeviceProperties.deviceName.data());
Features enabledDeviceFeatures = {
.m_Vulkan10Features = {.samplerAnisotropy = true},
.m_Vulkan12Features = {.bufferDeviceAddress = true},
.m_Vulkan12Features =
{
.descriptorIndexing = true,
.shaderSampledImageArrayNonUniformIndexing = true,
.shaderStorageBufferArrayNonUniformIndexing = true,
.shaderStorageImageArrayNonUniformIndexing = true,
.descriptorBindingUniformBufferUpdateAfterBind = true, // Not related to Bindless
.descriptorBindingSampledImageUpdateAfterBind = true,
.descriptorBindingStorageImageUpdateAfterBind = true,
.descriptorBindingStorageBufferUpdateAfterBind = true,
.descriptorBindingPartiallyBound = true,
.runtimeDescriptorArray = true,
.timelineSemaphore = true,
.bufferDeviceAddress = true,
.bufferDeviceAddressCaptureReplay = true,
},
.m_Vulkan13Features = {.synchronization2 = true, .dynamicRendering = true},
};
QueueAllocation queueAllocation = FindAppropriateQueueAllocation(&deviceToUse);
Device device = {&context, &deviceToUse, &enabledDeviceFeatures, {queueAllocation}, "Primary Device"};
vk::Queue commandQueue = device.GetQueue(queueAllocation.m_Family, 0);
Swapchain swapchain = {&surface, &device, window.GetSize(), "Primary Chain"};
Pipeline pipeline = CreatePipeline(&device, &swapchain);
systems::BufferManager bufferManager{&device, 12, 0};
systems::ImageManager imageManager{&device, 12, 1};
systems::RenderingDevice device{{
.m_Window = window,
.m_Features = enabledDeviceFeatures,
.m_AppName = "Box",
.m_AppVersion = VERSION,
.m_ShaderSearchPaths = {"shader/"},
}};
Pipeline pipeline;
auto pipelineResult =
device.CreateGraphicsPipeline(pipeline, {.m_Shaders = {
{.m_ShaderFile = SHADER_FILE, .m_EntryPoints = {"vsmain", "fsmain"}},
}});
ERROR_IF(pipelineResult, "Could not create pipeline. Cause: {}", pipelineResult.What())
THEN_ABORT(pipelineResult.Value());
auto swapchainSize = device.GetSwapchainSize();
Camera camera = {
.m_Model = {1.0f},
.m_View = glm::lookAt(vec3(0.0f, 2.0f, 2.0f), vec3(0.0f), vec3(0.0f, 1.0f, 0.0f)),
.m_View = lookAt(vec3(0.0f, 2.0f, 2.0f), vec3(0.0f), vec3(0.0f, 1.0f, 0.0f)),
.m_Perspective = glm::perspective(
70_deg, Cast<f32>(swapchain.m_Extent.width) / Cast<f32>(swapchain.m_Extent.height), 0.1f, 100.0f),
70_deg, static_cast<f32>(swapchainSize.m_Width) / static_cast<f32>(swapchainSize.m_Height), 0.1f, 100.0f),
};
vk::DescriptorPool descriptorPool;
vk::DescriptorSet descriptorSet;
{
vk::DescriptorSetLayout descriptorSetLayout = pipeline.m_SetLayouts.front();
eastl::array poolSizes = {
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eUniformBuffer,
.descriptorCount = 1,
},
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eCombinedImageSampler,
.descriptorCount = 1,
},
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eStorageBuffer,
.descriptorCount = 1,
},
};
vk::DescriptorPoolCreateInfo descriptorPoolCreateInfo = {
.maxSets = 1, .poolSizeCount = Cast<u32>(poolSizes.size()), .pPoolSizes = poolSizes.data()};
AbortIfFailed(device.m_Device.createDescriptorPool(&descriptorPoolCreateInfo, nullptr, &descriptorPool));
vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo = {
.descriptorPool = descriptorPool,
.descriptorSetCount = 1,
.pSetLayouts = &descriptorSetLayout,
};
AbortIfFailed(device.m_Device.allocateDescriptorSets(&descriptorSetAllocateInfo, &descriptorSet));
}
vk::CommandPool copyPool;
vk::CommandBuffer copyBuffer;
{
vk::CommandPoolCreateInfo poolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient,
.queueFamilyIndex = queueAllocation.m_Family,
};
AbortIfFailedM(device.m_Device.createCommandPool(&poolCreateInfo, nullptr, &copyPool),
"Copy command pool creation failed.");
vk::CommandBufferAllocateInfo bufferAllocateInfo = {
.commandPool = copyPool,
.level = vk::CommandBufferLevel::ePrimary,
.commandBufferCount = 1,
};
AbortIfFailedM(device.m_Device.allocateCommandBuffers(&bufferAllocateInfo, &copyBuffer),
"Copy command buffer allocation failed.");
}
eastl::array vertices = {
Vertex{.m_Position = vec3(0.5f, 0.5f, -0.5f), .m_TexCoord0 = vec2(1.0f, 1.0f)},
Vertex{.m_Position = vec3(0.5f, -0.5f, -0.5f), .m_TexCoord0 = vec2(1.0f, 0.0f)},
@ -222,21 +190,19 @@ main(int, char **)
assert(loaded);
INFO("Image {}x{} : {} channels", imageFile.m_Width, imageFile.m_Height, imageFile.m_NumChannels);
auto vbo = bufferManager.CreateStorageBuffer(vertices.size() * sizeof vertices[0], "Vertex Buffer").ToPointer();
auto crate = imageManager
.CreateTexture2D({
.m_Format = vk::Format::eR8G8B8A8Srgb,
.m_Extent = {imageFile.m_Width, imageFile.m_Height},
.m_Name = "Crate Texture",
})
.ToPointer();
vbo->Write(&device, 0, vertices.size() * sizeof vertices[0], vertices.data());
auto vbo = device.CreateStorageBuffer(vertices.size() * sizeof vertices[0], "Vertex Buffer");
vbo->Write(0, vertices.size() * sizeof vertices[0], vertices.data());
auto crate = device.CreateTexture2DWithView({
.m_Format = vk::Format::eR8G8B8A8Srgb,
.m_Extent = {imageFile.m_Width, imageFile.m_Height},
.m_Name = "Crate Texture",
});
{
StagingBuffer imageStaging;
imageStaging.Init(&device, imageFile.GetSize(), "Image Staging");
imageStaging.Write(&device, 0, imageFile.GetSize(), imageFile.m_Data);
auto imageStaging = device.CreateStagingBuffer(imageFile.GetSize(), "Image Staging");
imageStaging->Write(0, imageFile.GetSize(), imageFile.m_Data);
vk::ImageMemoryBarrier2 imageReadyToWrite = {
.srcStageMask = vk::PipelineStageFlagBits2::eTransfer,
@ -245,9 +211,9 @@ main(int, char **)
.dstAccessMask = vk::AccessFlagBits2::eTransferWrite,
.oldLayout = vk::ImageLayout::eUndefined,
.newLayout = vk::ImageLayout::eTransferDstOptimal,
.srcQueueFamilyIndex = queueAllocation.m_Family,
.dstQueueFamilyIndex = queueAllocation.m_Family,
.image = crate->m_Image,
.srcQueueFamilyIndex = vk::QueueFamilyIgnored,
.dstQueueFamilyIndex = vk::QueueFamilyIgnored,
.image = crate->GetImage(),
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
@ -269,9 +235,9 @@ main(int, char **)
.dstAccessMask = vk::AccessFlagBits2::eShaderRead,
.oldLayout = vk::ImageLayout::eTransferDstOptimal,
.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
.srcQueueFamilyIndex = queueAllocation.m_Family,
.dstQueueFamilyIndex = queueAllocation.m_Family,
.image = crate->m_Image,
.srcQueueFamilyIndex = device.m_TransferQueueFamily,
.dstQueueFamilyIndex = device.m_PrimaryQueueFamily,
.image = crate->GetImage(),
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
@ -286,140 +252,25 @@ main(int, char **)
.pImageMemoryBarriers = &imageReadyToRead,
};
vk::Fence fence;
vk::FenceCreateInfo fenceCreateInfo = {};
AbortIfFailed(device.m_Device.createFence(&fenceCreateInfo, nullptr, &fence));
auto context = device.CreateTransferContext();
context.Begin();
vk::CommandBufferBeginInfo beginInfo = {.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit};
AbortIfFailed(copyBuffer.begin(&beginInfo));
context.Dependency(imageReadyToWriteDependency);
copyBuffer.pipelineBarrier2(&imageReadyToWriteDependency);
context.UploadTexture(crate->m_Image, imageFile);
vk::BufferImageCopy imageCopy = {
.bufferOffset = 0,
.bufferRowLength = imageFile.m_Width,
.bufferImageHeight = imageFile.m_Height,
.imageSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {},
.imageExtent = {imageFile.m_Width, imageFile.m_Height, 1},
};
copyBuffer.copyBufferToImage(imageStaging.m_Buffer, crate->m_Image, vk::ImageLayout::eTransferDstOptimal, 1,
&imageCopy);
context.Dependency(imageReadyToReadDependency);
copyBuffer.pipelineBarrier2(&imageReadyToReadDependency);
context.End();
AbortIfFailed(copyBuffer.end());
vk::SubmitInfo submitInfo = {
.commandBufferCount = 1,
.pCommandBuffers = &copyBuffer,
};
AbortIfFailed(commandQueue.submit(1, &submitInfo, fence));
INFO("Submit copy");
AbortIfFailed(device.m_Device.waitForFences(1, &fence, true, MaxValue<u64>));
INFO("Fence wait");
AbortIfFailedM(device.m_Device.resetCommandPool(copyPool, {}), "Couldn't reset command pool.");
device.m_Device.destroy(fence, nullptr);
imageStaging.Destroy(&device);
auto recpt = device.Submit(context);
device.WaitOn(recpt);
}
vk::Sampler sampler;
{
vk::SamplerCreateInfo samplerCreateInfo = {
.magFilter = vk::Filter::eLinear,
.minFilter = vk::Filter::eLinear,
.mipmapMode = vk::SamplerMipmapMode::eLinear,
.addressModeU = vk::SamplerAddressMode::eRepeat,
.addressModeV = vk::SamplerAddressMode::eRepeat,
.addressModeW = vk::SamplerAddressMode::eRepeat,
.mipLodBias = 0.2f,
.anisotropyEnable = true,
.maxAnisotropy = 1.0f,
.compareEnable = false,
.minLod = 0,
.maxLod = 4,
.unnormalizedCoordinates = false,
};
AbortIfFailed(device.m_Device.createSampler(&samplerCreateInfo, nullptr, &sampler));
}
auto ubo = bufferManager.CreateUniformBuffer(sizeof camera, "Camera UBO").ToPointer();
ubo->Write(&device, 0, sizeof camera, &camera);
vk::DescriptorBufferInfo descriptorBufferInfo = {
.buffer = ubo->m_Buffer,
.offset = 0,
.range = ubo->GetSize(),
};
vk::DescriptorImageInfo descriptorImageInfo = {
.sampler = sampler,
.imageView = crate->m_View,
.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
};
vk::DescriptorBufferInfo descriptorStorageBufferInfo = {
.buffer = vbo->m_Buffer,
.offset = 0,
.range = vbo->GetSize(),
};
eastl::array writeDescriptors = {
vk::WriteDescriptorSet{
.dstSet = descriptorSet,
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eUniformBuffer,
.pBufferInfo = &descriptorBufferInfo,
},
vk::WriteDescriptorSet{
.dstSet = descriptorSet,
.dstBinding = 1,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
.pImageInfo = &descriptorImageInfo,
},
vk::WriteDescriptorSet{
.dstSet = descriptorSet,
.dstBinding = 2,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eStorageBuffer,
.pBufferInfo = &descriptorStorageBufferInfo,
},
};
device.m_Device.updateDescriptorSets(Cast<u32>(writeDescriptors.size()), writeDescriptors.data(), 0, nullptr);
auto ubo = device.CreateStorageBuffer(sizeof camera, "Camera UBO");
ubo->Write(0, sizeof camera, &camera);
// Persistent variables
vk::Viewport viewport = {
.x = 0,
.y = Cast<f32>(swapchain.m_Extent.height),
.width = Cast<f32>(swapchain.m_Extent.width),
.height = -Cast<f32>(swapchain.m_Extent.height),
.minDepth = 0.0,
.maxDepth = 1.0,
};
vk::Rect2D scissor = {
.offset = {0, 0},
.extent = swapchain.m_Extent,
};
auto resizeViewportScissor = [&viewport, &scissor](vk::Extent2D extent) {
viewport.y = Cast<f32>(extent.height);
viewport.width = Cast<f32>(extent.width);
viewport.height = -Cast<f32>(extent.height);
scissor.extent = extent;
};
swapchain.RegisterResizeCallback(resizeViewportScissor);
vk::ImageSubresourceRange subresourceRange = {
.aspectMask = vk::ImageAspectFlagBits::eColor,
@ -437,8 +288,8 @@ main(int, char **)
.dstAccessMask = vk::AccessFlagBits2::eColorAttachmentWrite,
.oldLayout = vk::ImageLayout::eUndefined,
.newLayout = vk::ImageLayout::eColorAttachmentOptimal,
.srcQueueFamilyIndex = queueAllocation.m_Family,
.dstQueueFamilyIndex = queueAllocation.m_Family,
.srcQueueFamilyIndex = vk::QueueFamilyIgnored,
.dstQueueFamilyIndex = vk::QueueFamilyIgnored,
.subresourceRange = subresourceRange,
};
vk::DependencyInfo topOfThePipeDependency = {
@ -452,8 +303,8 @@ main(int, char **)
.dstAccessMask = vk::AccessFlagBits2::eNone,
.oldLayout = vk::ImageLayout::eColorAttachmentOptimal,
.newLayout = vk::ImageLayout::ePresentSrcKHR,
.srcQueueFamilyIndex = queueAllocation.m_Family,
.dstQueueFamilyIndex = queueAllocation.m_Family,
.srcQueueFamilyIndex = vk::QueueFamilyIgnored,
.dstQueueFamilyIndex = vk::QueueFamilyIgnored,
.subresourceRange = subresourceRange,
};
vk::DependencyInfo renderToPresentDependency = {
@ -461,50 +312,85 @@ main(int, char **)
.pImageMemoryBarriers = &renderToPresentBarrier,
};
FrameManager frameManager = {&device, queueAllocation.m_Family, MAX_FRAMES_IN_FLIGHT};
eastl::fixed_vector<Ref<Image>, MAX_FRAMES_IN_FLIGHT> depthImages;
eastl::fixed_vector<Ref<ImageView>, MAX_FRAMES_IN_FLIGHT> depthImages;
auto initDepthImages = [&imageManager, &depthImages, &frameManager] (const vk::Extent2D extent) {
for (u32 i = 0; i < frameManager.m_FramesInFlight; ++i)
auto initDepthImages = [&depthImages, &device](vk::Extent2D const extent) {
for (u32 i = 0; i < MAX_FRAMES_IN_FLIGHT; ++i)
{
depthImages.push_back(
imageManager.CreateDepthStencilImage({.m_Extent = extent, .m_Name = "Depth"}).ToPointer());
depthImages.push_back(device.CreateDepthStencilImageWithView({.m_Extent = extent, .m_Name = "Depth"}));
}
};
initDepthImages(swapchain.m_Extent);
initDepthImages(swapchainSize);
auto recreateDepthBuffers = [&depthImages, &initDepthImages](const vk::Extent2D extent) {
auto recreateDepthBuffers = [&depthImages, &initDepthImages](vk::Extent2D const extent) {
depthImages.clear();
initDepthImages(extent);
};
swapchain.RegisterResizeCallback(recreateDepthBuffers);
struct PCB
{
uptr m_VertexBuffer;
uptr m_Camera;
systems::ResId<TextureView> m_Texture;
};
static_assert(sizeof(PCB) == 24);
auto &commitManager = systems::CommitManager::Instance();
PCB pcb = {
.m_VertexBuffer = vbo->GetDeviceAddress(),
.m_Camera = ubo->GetDeviceAddress(),
.m_Texture = commitManager.CommitTexture(crate),
};
Time::Init();
auto prevSwapchainSize = swapchainSize;
INFO("Starting loop");
while (window.Poll())
{
Time::Update();
camera.m_Model *= rotate(mat4{1.0f}, Cast<f32>(45.0_deg * Time::m_Delta), vec3(0.0f, 1.0f, 0.0f));
ubo->Write(&device, 0, sizeof camera, &camera);
camera.m_Model *= rotate(mat4{1.0f}, static_cast<f32>(45.0_deg * Time::m_Delta), vec3(0.0f, 1.0f, 0.0f));
ubo->Write(0, sizeof camera, &camera);
Frame *currentFrame = frameManager.GetNextFrame(&swapchain, &surface, window.GetSize());
auto &currentFrame = device.GetNextFrame();
u32 imageIndex = currentFrame->m_ImageIdx;
vk::ImageView currentImageView = swapchain.m_ImageViews[imageIndex];
vk::Image currentImage = swapchain.m_Images[imageIndex];
vk::CommandBuffer cmd = currentFrame->m_CommandBuffer;
vk::ImageView currentDepthImageView = depthImages[currentFrame->m_FrameIdx]->m_View;
prevSwapchainSize = swapchainSize;
swapchainSize = currentFrame.m_SwapchainSize;
if (swapchainSize != prevSwapchainSize)
{
recreateDepthBuffers(swapchainSize);
}
vk::Viewport viewport = {
.x = 0,
.y = static_cast<f32>(swapchainSize.m_Height),
.width = static_cast<f32>(swapchainSize.m_Width),
.height = -static_cast<f32>(swapchainSize.m_Height),
.minDepth = 0.0,
.maxDepth = 1.0,
};
vk::Rect2D scissor = {
.offset = {0, 0},
.extent = static_cast<vk::Extent2D>(swapchainSize),
};
vk::ImageView currentImageView = currentFrame.m_SwapchainImageView;
vk::Image currentImage = currentFrame.m_SwapchainImage;
vk::ImageView currentDepthImageView = depthImages[currentFrame.m_FrameIdx]->m_View;
topOfThePipeBarrier.image = currentImage;
renderToPresentBarrier.image = currentImage;
vk::CommandBufferBeginInfo beginInfo = {.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit};
AbortIfFailed(cmd.begin(&beginInfo));
auto context = currentFrame.CreateGraphicsContext();
cmd.pipelineBarrier2(&topOfThePipeDependency);
context.Begin();
context.Dependency(topOfThePipeDependency);
// Render
eastl::array attachmentInfos = {
@ -528,207 +414,30 @@ main(int, char **)
};
vk::RenderingInfo renderingInfo = {
.renderArea = {.extent = swapchain.m_Extent},
.renderArea = scissor,
.layerCount = 1,
.colorAttachmentCount = Cast<u32>(attachmentInfos.size()),
.colorAttachmentCount = static_cast<u32>(attachmentInfos.size()),
.pColorAttachments = attachmentInfos.data(),
.pDepthAttachment = &depthAttachment,
};
cmd.beginRendering(&renderingInfo);
context.BeginRendering(renderingInfo);
cmd.setViewport(0, 1, &viewport);
cmd.setScissor(0, 1, &scissor);
cmd.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline.m_Pipeline);
cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline.m_Layout, 0, 1, &descriptorSet, 0, nullptr);
cmd.draw(Cast<u32>(vertices.size()), 1, 0, 0);
context.SetViewport(viewport);
context.BindPipeline(pipeline);
context.PushConstantBlock(pcb);
context.Draw(vertices.size());
cmd.endRendering();
context.EndRendering();
cmd.pipelineBarrier2(&renderToPresentDependency);
context.Dependency(renderToPresentDependency);
AbortIfFailed(cmd.end());
context.End();
vk::PipelineStageFlags waitDstStage = vk::PipelineStageFlagBits::eColorAttachmentOutput;
vk::SubmitInfo submitInfo = {
.waitSemaphoreCount = 1,
.pWaitSemaphores = &currentFrame->m_ImageAcquireSem,
.pWaitDstStageMask = &waitDstStage,
.commandBufferCount = 1,
.pCommandBuffers = &cmd,
.signalSemaphoreCount = 1,
.pSignalSemaphores = &currentFrame->m_RenderFinishSem,
};
AbortIfFailed(commandQueue.submit(1, &submitInfo, currentFrame->m_FrameAvailableFence));
currentFrame->Present(commandQueue, &swapchain, &surface, window.GetSize());
device.Present(currentFrame, context);
}
device.WaitIdle();
device.m_Device.destroy(sampler, nullptr);
device.m_Device.destroy(descriptorPool, nullptr);
device.m_Device.destroy(copyPool, nullptr);
return 0;
}
Pipeline
CreatePipeline(const Device *device, const Swapchain *swapchain)
{
// Pipeline Setup
auto vertexShaderModule = CreateShader(device, VERTEX_SHADER_FILE);
auto fragmentShaderModule = CreateShader(device, FRAGMENT_SHADER_FILE);
eastl::array<vk::PipelineShaderStageCreateInfo, 2> shaderStages = {{
{
.stage = vk::ShaderStageFlagBits::eVertex,
.module = vertexShaderModule,
.pName = "main",
},
{
.stage = vk::ShaderStageFlagBits::eFragment,
.module = fragmentShaderModule,
.pName = "main",
},
}};
eastl::array descriptorSetLayoutBinding = {
vk::DescriptorSetLayoutBinding{
.binding = 0,
.descriptorType = vk::DescriptorType::eUniformBuffer,
.descriptorCount = 1,
.stageFlags = vk::ShaderStageFlagBits::eVertex,
},
vk::DescriptorSetLayoutBinding{
.binding = 1,
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
.descriptorCount = 1,
.stageFlags = vk::ShaderStageFlagBits::eFragment,
},
vk::DescriptorSetLayoutBinding{
.binding = 2,
.descriptorType = vk::DescriptorType::eStorageBuffer,
.descriptorCount = 1,
.stageFlags = vk::ShaderStageFlagBits::eVertex,
},
};
vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = {
.bindingCount = Cast<u32>(descriptorSetLayoutBinding.size()),
.pBindings = descriptorSetLayoutBinding.data(),
};
vk::DescriptorSetLayout descriptorSetLayout;
AbortIfFailed(
device->m_Device.createDescriptorSetLayout(&descriptorSetLayoutCreateInfo, nullptr, &descriptorSetLayout));
vk::PipelineLayoutCreateInfo pipelineLayoutCreateInfo = {
.setLayoutCount = 1,
.pSetLayouts = &descriptorSetLayout,
.pushConstantRangeCount = 0,
.pPushConstantRanges = nullptr,
};
vk::PipelineLayout pipelineLayout;
AbortIfFailed(device->m_Device.createPipelineLayout(&pipelineLayoutCreateInfo, nullptr, &pipelineLayout));
device->SetName(pipelineLayout, "Box Layout");
vk::PipelineVertexInputStateCreateInfo vertexInputStateCreateInfo = {};
vk::PipelineInputAssemblyStateCreateInfo inputAssemblyStateCreateInfo = {
.topology = vk::PrimitiveTopology::eTriangleList,
.primitiveRestartEnable = false,
};
vk::PipelineViewportStateCreateInfo viewportStateCreateInfo = {
.viewportCount = 1,
.scissorCount = 1,
};
vk::PipelineRasterizationStateCreateInfo rasterizationStateCreateInfo = {
.depthClampEnable = false,
.rasterizerDiscardEnable = false,
.polygonMode = vk::PolygonMode::eFill,
.cullMode = vk::CullModeFlagBits::eNone,
.frontFace = vk::FrontFace::eCounterClockwise,
.depthBiasEnable = false,
.lineWidth = 1.0,
};
vk::PipelineMultisampleStateCreateInfo multisampleStateCreateInfo = {
.rasterizationSamples = vk::SampleCountFlagBits::e1,
.sampleShadingEnable = false,
};
vk::PipelineDepthStencilStateCreateInfo depthStencilStateCreateInfo = {
.depthTestEnable = true,
.depthWriteEnable = true,
.depthCompareOp = vk::CompareOp::eLess,
};
vk::PipelineColorBlendAttachmentState colorBlendAttachmentState = {
.blendEnable = false,
.srcColorBlendFactor = vk::BlendFactor::eSrcColor,
.dstColorBlendFactor = vk::BlendFactor::eOneMinusSrcColor,
.colorBlendOp = vk::BlendOp::eAdd,
.srcAlphaBlendFactor = vk::BlendFactor::eSrcAlpha,
.dstAlphaBlendFactor = vk::BlendFactor::eOneMinusSrcAlpha,
.alphaBlendOp = vk::BlendOp::eAdd,
.colorWriteMask = vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG |
vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA,
};
vk::PipelineColorBlendStateCreateInfo colorBlendStateCreateInfo = {
.logicOpEnable = false,
.attachmentCount = 1,
.pAttachments = &colorBlendAttachmentState,
};
eastl::array dynamicStates = {
vk::DynamicState::eScissor,
vk::DynamicState::eViewport,
};
vk::PipelineDynamicStateCreateInfo dynamicStateCreateInfo = {
.dynamicStateCount = Cast<u32>(dynamicStates.size()),
.pDynamicStates = dynamicStates.data(),
};
vk::PipelineRenderingCreateInfo renderingCreateInfo = {
.viewMask = 0,
.colorAttachmentCount = 1,
.pColorAttachmentFormats = &swapchain->m_Format,
.depthAttachmentFormat = vk::Format::eD24UnormS8Uint,
};
vk::GraphicsPipelineCreateInfo pipelineCreateInfo = {
.pNext = &renderingCreateInfo,
.stageCount = Cast<u32>(shaderStages.size()),
.pStages = shaderStages.data(),
.pVertexInputState = &vertexInputStateCreateInfo,
.pInputAssemblyState = &inputAssemblyStateCreateInfo,
.pViewportState = &viewportStateCreateInfo,
.pRasterizationState = &rasterizationStateCreateInfo,
.pMultisampleState = &multisampleStateCreateInfo,
.pDepthStencilState = &depthStencilStateCreateInfo,
.pColorBlendState = &colorBlendStateCreateInfo,
.pDynamicState = &dynamicStateCreateInfo,
.layout = pipelineLayout,
};
vk::Pipeline pipeline;
AbortIfFailed(device->m_Device.createGraphicsPipelines(nullptr, 1, &pipelineCreateInfo, nullptr, &pipeline));
device->SetName(pipeline, "Box Pipeline");
device->m_Device.destroy(vertexShaderModule, nullptr);
device->m_Device.destroy(fragmentShaderModule, nullptr);
return {device, pipelineLayout, pipeline, {descriptorSetLayout}};
}
vk::ShaderModule
CreateShader(const Device *device, cstr shaderFile)
{
eastl::vector<u32> shaderCode = ReadFile(shaderFile);
const vk::ShaderModuleCreateInfo shaderModuleCreateInfo = {
.codeSize = shaderCode.size() * sizeof(u32),
.pCode = shaderCode.data(),
};
vk::ShaderModule shaderModule;
AbortIfFailedMV(device->m_Device.createShaderModule(&shaderModuleCreateInfo, nullptr, &shaderModule),
"Shader {} could not be created.", shaderFile);
return shaderModule;
}

View File

@ -0,0 +1,23 @@
[vk::binding(0, 0)] __DynamicResource<__DynamicResourceKind.General> gBuffers[];
[vk::binding(1, 0)] __DynamicResource<__DynamicResourceKind.Sampler> gSamplers[];
[vk::binding(2, 0)] __DynamicResource<__DynamicResourceKind.General> gStorageTextures[];
export T getDescriptorFromHandle<T>(DescriptorHandle<T> handle) where T : IOpaqueDescriptor
{
__target_switch
{
case spirv:
switch (T.kind) {
case DescriptorKind.Buffer:
return gBuffers[((uint2)handle).x].asOpaqueDescriptor<T>();
case DescriptorKind.CombinedTextureSampler:
return gSamplers[((uint2)handle).x].asOpaqueDescriptor<T>();
case DescriptorKind.Texture:
return gStorageTextures[((uint2)handle).x].asOpaqueDescriptor<T>();
default:
return defaultGetDescriptorFromHandle(handle);
}
default:
return defaultGetDescriptorFromHandle(handle);
}
}

View File

@ -1,11 +0,0 @@
#version 450
#pragma shader_stage(fragment)
layout (location = 0) in vec2 inUV;
layout (location = 0) out vec4 outColor;
layout(binding = 1) uniform sampler2D tex;
void main() {
outColor = vec4(texture(tex, inUV).rgb, 1.0f);
}

View File

@ -1,19 +0,0 @@
struct FS_Input {
float2 UV0 : TEXCOORD0;
};
struct FS_Output
{
float4 ColorTarget : SV_Target0;
};
[[vk::binding(1, 0)]] Texture2D<float4> Texture;
[[vk::binding(1, 0)]] SamplerState Sampler;
FS_Output main(FS_Input StageInput) {
FS_Output output;
output.ColorTarget = float4(Texture.Sample(Sampler, StageInput.UV0).rgb, 1.0);
return output;
}

View File

@ -0,0 +1,57 @@
import bindless;
struct VertexData
{
float4 position;
float2 texCoord0;
float2 _pad0;
};
struct CameraData
{
float4x4 model;
float4x4 view;
float4x4 projection;
};
struct PCB {
VertexData* vertexBuffer;
CameraData* cameraBuffer;
Sampler2D.Handle texture;
};
[vk::push_constant]
uniform PCB pcb;
struct VSIn {
uint vertexIndex : SV_VertexID;
};
struct VSOut
{
float4 position : SV_POSITION;
float2 texCoord0 : TEXCOORD0;
};
struct FSOut {
float4 Color;
};
[shader("vertex")]
func vsmain(VSIn input) -> VSOut {
VSOut output;
VertexData vd = pcb.vertexBuffer[input.vertexIndex];
output.position = mul(mul(mul(float4(vd.position.xyz, 1.0f), pcb.cameraBuffer->model), pcb.cameraBuffer->view), pcb.cameraBuffer->projection);
output.texCoord0 = vd.texCoord0;
return output;
}
[shader("fragment")]
func fsmain(VSOut input) -> FSOut {
FSOut outp;
outp.Color = float4(pcb.texture.Sample(input.texCoord0).rgb, 1.0);
return outp;
}

View File

@ -1,19 +0,0 @@
#version 450
#pragma shader_stage(vertex)
layout(location=0) in vec4 position;
layout(location=1) in vec2 uv0;
layout(location=0) out vec2 outUV;
layout(binding=0) uniform Camera {
mat4 model;
mat4 view;
mat4 proj;
} ubo;
void main() {
outUV = uv0;
gl_Position = ubo.proj * ubo.view * ubo.model * vec4(position.xyz, 1.0f);
// outColor = vec3(0.5f, 0.3f, 0.1f);
}

View File

@ -1,36 +0,0 @@
struct VS_Input
{
uint VertexIndex : SV_VertexID;
};
struct VS_Output
{
float2 UV0 : TEXCOORD0;
float4 VertexPosition : SV_Position;
};
struct CameraData {
float4x4 Model;
float4x4 View;
float4x4 Projection;
};
struct VertexData {
float4 Position;
float2 UV0;
};
[[vk::binding(0, 0)]] ConstantBuffer<CameraData> Camera;
[[vk::binding(2, 0)]] StructuredBuffer<VertexData> Vertices;
VS_Output main(VS_Input StageInput) {
VS_Output output;
output.UV0 = Vertices[StageInput.VertexIndex].UV0;
float4 position = Vertices[StageInput.VertexIndex].Position;
output.VertexPosition = mul(Camera.Projection, mul(Camera.View, mul(Camera.Model, position)));
return output;
}

View File

@ -5,28 +5,25 @@ cmake_minimum_required(VERSION 3.13)
#set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined -fsanitize=address")
find_path(TINYGLTF_INCLUDE_DIRS "tiny_gltf.h")
add_executable(model_render "model_render.cpp"
"pipeline_utils.cpp"
"pipeline_utils.h"
"asset_loader.cpp"
"asset_loader.h"
"light_manager.cpp"
"light_manager.h"
"gpu_resource_manager.cpp"
"gpu_resource_manager.h"
"nodes.cpp"
"nodes.h"
"ibl_helpers.cpp"
"ibl_helpers.h")
add_executable(model_render
"model_render.cpp"
"asset_loader.cpp"
"asset_loader.h"
"light_manager.cpp"
"light_manager.h"
"nodes.cpp"
"nodes.h"
"ibl_helpers.cpp"
"ibl_helpers.h"
"tiny_gltf_setup.cpp")
add_shader(model_render "shader/model.vs.hlsl")
add_shader(model_render "shader/model.ps.hlsl")
add_shader(model_render "shader/eqrect_to_cube.cs.hlsl")
add_shader(model_render "shader/background.vs.hlsl")
add_shader(model_render "shader/background.ps.hlsl")
add_shader(model_render "shader/diffuse_irradiance.cs.hlsl")
add_shader(model_render "shader/prefilter.cs.hlsl")
add_shader(model_render "shader/brdf_lut.cs.hlsl")
add_shader(model_render "shader/background.slang")
add_shader(model_render "shader/bindless.slang")
add_shader(model_render "shader/common_structs.slang")
add_shader(model_render "shader/environment.slang")
add_shader(model_render "shader/eqrect_to_cube.slang")
add_shader(model_render "shader/ibl_common.slang")
add_shader(model_render "shader/model.slang")
target_link_libraries(model_render PRIVATE aster_core)
target_link_libraries(model_render PRIVATE util_helper)

View File

@ -1,27 +1,26 @@
// =============================================
// Aster: asset_loader.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#define TINYGLTF_NOEXCEPTION
#define JSON_NOEXCEPTION
#define TINYGLTF_IMPLEMENTATION
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "aster/core/buffer.h"
#include "aster/core/device.h"
#include "aster/core/image.h"
#include "gpu_resource_manager.h"
#include "helpers.h"
#include "asset_loader.h"
#include "helpers.h"
#include "aster/systems/commit_manager.h"
#include "aster/systems/rendering_device.h"
#include <EASTL/fixed_vector.h>
#include <EASTL/hash_map.h>
#include <glm/gtc/type_ptr.hpp>
#include <filesystem>
#include <stb_image.h>
#include <tiny_gltf.h>
#if defined(LoadImage)
@ -31,19 +30,29 @@
constexpr vk::CommandBufferBeginInfo OneTimeCmdBeginInfo = {.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit};
vec4
VectorToVec4(const std::vector<double> &vec)
VectorToVec4(std::vector<double> const &vec)
{
if (vec.empty())
{
return vec4{0.0f};
}
assert(vec.size() == 4);
return {vec[0], vec[1], vec[2], vec[3]};
}
vec4
VectorToVec4(std::vector<double> const &vec, float w)
{
if (vec.empty())
{
return vec4{0.0f};
}
assert(vec.size() == 3);
return {vec[0], vec[1], vec[2], w};
}
vec3
VectorToVec3(const std::vector<double> &vec)
VectorToVec3(std::vector<double> const &vec)
{
if (vec.empty())
{
@ -54,51 +63,28 @@ VectorToVec3(const std::vector<double> &vec)
return {vec[0], vec[1], vec[2]};
}
void
AssetLoader::LoadHdrImage(Texture *texture, cstr path, cstr name) const
Ref<TextureView>
AssetLoader::LoadHdrImage(cstr path, cstr name) const
{
const Device *pDevice = m_ResourceManager->m_Device;
ERROR_IF(texture->IsValid(), "Expected invalid image.") THEN_ABORT(-1);
i32 x, y, nChannels;
f32 *data = stbi_loadf(path, &x, &y, &nChannels, 4);
assert(nChannels == 3);
ERROR_IF(!data, "Could not load {}", path) THEN_ABORT(-1);
u32 width = Cast<u32>(x);
u32 height = Cast<u32>(y);
u32 width = static_cast<u32>(x);
u32 height = static_cast<u32>(y);
StagingBuffer stagingBuffer;
texture->Init(m_ResourceManager->m_Device, {width, height}, vk::Format::eR32G32B32A32Sfloat, false, path);
assert(texture->IsValid());
stagingBuffer.Init(m_ResourceManager->m_Device, (sizeof *data) * x * y * 4, "HDR Staging Buffer");
stagingBuffer.Write(m_ResourceManager->m_Device, 0, stagingBuffer.GetSize(), data);
stbi_image_free(data);
auto texture = m_Device->CreateTexture2DWithView({
.m_Format = vk::Format::eR32G32B32A32Sfloat,
.m_Extent = {width, height},
.m_Name = path,
.m_IsSampled = true,
.m_IsMipMapped = false,
.m_IsStorage = false,
});
#pragma region Setup Copy/Sync primitives
vk::BufferImageCopy2 copyRegion = {
.bufferOffset = 0,
.bufferRowLength = width,
.bufferImageHeight = height,
.imageSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {0, 0, 0},
.imageExtent = texture->m_Extent,
};
vk::CopyBufferToImageInfo2 stagingInfo = {
.srcBuffer = stagingBuffer.m_Buffer,
.dstImage = texture->m_Image,
.dstImageLayout = vk::ImageLayout::eTransferDstOptimal,
.regionCount = 1,
.pRegions = &copyRegion,
};
vk::ImageMemoryBarrier2 readyToStageBarrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eAllCommands,
.srcAccessMask = vk::AccessFlagBits2::eNone,
@ -108,7 +94,7 @@ AssetLoader::LoadHdrImage(Texture *texture, cstr path, cstr name) const
.newLayout = vk::ImageLayout::eTransferDstOptimal,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = texture->m_Image,
.image = texture->GetImage(),
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
@ -131,9 +117,9 @@ AssetLoader::LoadHdrImage(Texture *texture, cstr path, cstr name) const
.dstAccessMask = vk::AccessFlagBits2::eShaderRead,
.oldLayout = vk::ImageLayout::eTransferDstOptimal,
.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
.srcQueueFamilyIndex = m_TransferQueueIndex,
.dstQueueFamilyIndex = m_GraphicsQueueIndex,
.image = texture->m_Image,
.srcQueueFamilyIndex = m_Device->m_TransferQueueFamily,
.dstQueueFamilyIndex = m_Device->m_PrimaryQueueFamily,
.image = texture->GetImage(),
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
@ -151,49 +137,31 @@ AssetLoader::LoadHdrImage(Texture *texture, cstr path, cstr name) const
};
#pragma endregion
AbortIfFailed(m_CommandBuffer.begin(&OneTimeCmdBeginInfo));
auto context = m_Device->CreateTransferContext();
context.Begin();
#if !defined(ASTER_NDEBUG)
StackString<128> loadActionName = "Load: ";
loadActionName += name ? name : path;
vk::DebugUtilsLabelEXT debugLabel = {
.pLabelName = loadActionName.c_str(),
.color = std::array{1.0f, 1.0f, 1.0f, 1.0f},
};
m_CommandBuffer.beginDebugUtilsLabelEXT(&debugLabel);
#endif
context.BeginDebugRegion(loadActionName.c_str());
m_CommandBuffer.pipelineBarrier2(&readyToStageDependency);
m_CommandBuffer.copyBufferToImage2(&stagingInfo);
m_CommandBuffer.pipelineBarrier2(&postStagingDependency);
context.Dependency(readyToStageDependency);
context.UploadTexture(texture->m_Image, {reinterpret_cast<u8 *>(data), (sizeof *data) * x * y * 4});
context.Dependency(postStagingDependency);
#if !defined(ASTER_NDEBUG)
m_CommandBuffer.endDebugUtilsLabelEXT();
#endif
context.EndDebugRegion();
AbortIfFailed(m_CommandBuffer.end());
context.End();
vk::SubmitInfo submitInfo = {
.waitSemaphoreCount = 0,
.pWaitDstStageMask = nullptr,
.commandBufferCount = 1,
.pCommandBuffers = &m_CommandBuffer,
};
auto rcpt = m_Device->Submit(context);
stbi_image_free(data);
vk::Fence fence;
vk::FenceCreateInfo fenceCreateInfo = {};
AbortIfFailed(pDevice->m_Device.createFence(&fenceCreateInfo, nullptr, &fence));
AbortIfFailed(m_TransferQueue.submit(1, &submitInfo, fence));
AbortIfFailed(pDevice->m_Device.waitForFences(1, &fence, true, MaxValue<u32>));
pDevice->m_Device.destroy(fence, nullptr);
m_Device->WaitOn(rcpt);
AbortIfFailed(pDevice->m_Device.resetCommandPool(m_CommandPool, {}));
stagingBuffer.Destroy(pDevice);
return texture;
}
void
GenerateMipMaps(vk::CommandBuffer commandBuffer, Texture *texture, vk::ImageLayout initialLayout,
GenerateMipMaps(systems::TransferContext &context, Ref<Texture> const &texture, vk::ImageLayout initialLayout,
vk::ImageLayout finalLayout, vk::PipelineStageFlags2 prevStage, vk::PipelineStageFlags2 finalStage)
{
#if !defined(ASTER_NDEBUG)
@ -201,7 +169,7 @@ GenerateMipMaps(vk::CommandBuffer commandBuffer, Texture *texture, vk::ImageLayo
.pLabelName = "Generate Mipmap",
.color = std::array{0.9f, 0.9f, 0.9f, 1.0f},
};
commandBuffer.beginDebugUtilsLabelEXT(&label);
context.BeginDebugRegion("Generate MipMap", {0.9, 0.9, 0.9, 1.0});
#endif
vk::ImageMemoryBarrier2 imageStartBarrier = {
@ -243,7 +211,7 @@ GenerateMipMaps(vk::CommandBuffer commandBuffer, Texture *texture, vk::ImageLayo
}
vk::DependencyInfo imageStartDependency = {
.imageMemoryBarrierCount = Cast<u32>(startBarriers.size()),
.imageMemoryBarrierCount = static_cast<u32>(startBarriers.size()),
.pImageMemoryBarriers = startBarriers.data(),
};
@ -324,10 +292,10 @@ GenerateMipMaps(vk::CommandBuffer commandBuffer, Texture *texture, vk::ImageLayo
// Mip Mapping
commandBuffer.pipelineBarrier2(&imageStartDependency);
context.Dependency(imageStartDependency);
i32 prevMipWidth = Cast<i32>(texture->m_Extent.width);
i32 prevMipHeight = Cast<i32>(texture->m_Extent.height);
i32 prevMipWidth = static_cast<i32>(texture->m_Extent.width);
i32 prevMipHeight = static_cast<i32>(texture->m_Extent.height);
u32 maxPrevMip = texture->GetMipLevels() - 1;
for (u32 prevMipLevel = 0; prevMipLevel < maxPrevMip; ++prevMipLevel)
@ -349,47 +317,50 @@ GenerateMipMaps(vk::CommandBuffer commandBuffer, Texture *texture, vk::ImageLayo
nextMipBarrier.subresourceRange.baseMipLevel = currentMipLevel;
commandBuffer.blitImage2(&mipBlitInfo);
commandBuffer.pipelineBarrier2(&interMipDependency);
context.Blit(mipBlitInfo);
context.Dependency(interMipDependency);
prevMipHeight = currentMipHeight;
prevMipWidth = currentMipWidth;
}
commandBuffer.pipelineBarrier2(&imageReadyDependency);
context.Dependency(imageReadyDependency);
#if !defined(ASTER_NDEBUG)
commandBuffer.endDebugUtilsLabelEXT();
context.EndDebugRegion();
#endif
}
TextureHandle
AssetLoader::LoadImageToGpu(StagingBuffer *stagingBuffer, tinygltf::Image *image, bool isSrgb) const
systems::ResId<TextureView>
AssetLoader::LoadImageToGpu(systems::TransferContext &context, tinygltf::Image *image, bool isSrgb, cstr name) const
{
// TODO(Something not loading properly).
assert(image->component == 4);
assert(image->height > 0 && image->width > 0);
u32 height = Cast<u32>(image->height);
u32 width = Cast<u32>(image->width);
#if !defined(ASTER_NDEBUG)
auto assignedName = name ? name : image->name.empty() ? image->uri.c_str() : image->name.c_str();
#else
auto assignedName = nullptr;
#endif
u32 height = static_cast<u32>(image->height);
u32 width = static_cast<u32>(image->width);
vk::Format imageFormat = isSrgb ? vk::Format::eR8G8B8A8Srgb : vk::Format::eR8G8B8A8Unorm;
Texture texture;
auto texture = m_Device->CreateTexture2D<Texture>({
.m_Format = imageFormat,
.m_Extent = {width, height},
.m_Name = assignedName,
.m_IsSampled = true,
.m_IsMipMapped = true,
.m_IsStorage = false,
});
usize byteSize = image->image.size();
texture.Init(m_ResourceManager->m_Device, {.width = width, .height = height}, imageFormat, true,
image->name.data());
stagingBuffer->Init(m_ResourceManager->m_Device, byteSize);
stagingBuffer->Write(m_ResourceManager->m_Device, 0, byteSize, image->image.data());
#if !defined(ASTER_NDEBUG)
StackString<128> loadActionName = "Load: ";
loadActionName += image->name.empty() ? "<texture>" : image->name.c_str();
vk::DebugUtilsLabelEXT debugLabel = {
.pLabelName = loadActionName.c_str(),
.color = std::array{1.0f, 1.0f, 1.0f, 1.0f},
};
m_CommandBuffer.beginDebugUtilsLabelEXT(&debugLabel);
#endif
loadActionName += assignedName;
context.BeginDebugRegion(loadActionName.c_str());
#pragma region Barriers and Blits
@ -402,7 +373,7 @@ AssetLoader::LoadImageToGpu(StagingBuffer *stagingBuffer, tinygltf::Image *image
.newLayout = vk::ImageLayout::eTransferDstOptimal,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = texture.m_Image,
.image = texture->m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
@ -421,12 +392,14 @@ AssetLoader::LoadImageToGpu(StagingBuffer *stagingBuffer, tinygltf::Image *image
vk::ImageMemoryBarrier2 postStagingBarrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eAllTransfer,
.srcAccessMask = vk::AccessFlagBits2::eTransferWrite,
.dstStageMask = vk::PipelineStageFlagBits2::eAllTransfer,
.dstAccessMask = vk::AccessFlagBits2::eTransferRead,
.oldLayout = vk::ImageLayout::eTransferDstOptimal,
.newLayout = vk::ImageLayout::eTransferSrcOptimal,
.srcQueueFamilyIndex = m_TransferQueueIndex,
.dstQueueFamilyIndex = m_GraphicsQueueIndex,
.image = texture.m_Image,
.srcQueueFamilyIndex = m_Device->m_TransferQueueFamily,
.dstQueueFamilyIndex = m_Device->m_PrimaryQueueFamily,
.image = texture->m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
@ -436,49 +409,28 @@ AssetLoader::LoadImageToGpu(StagingBuffer *stagingBuffer, tinygltf::Image *image
.layerCount = 1,
},
};
;
vk::DependencyInfo postStagingDependency = {
.imageMemoryBarrierCount = 1,
.pImageMemoryBarriers = &postStagingBarrier,
};
vk::BufferImageCopy2 imageCopy = {
.bufferOffset = 0,
.bufferRowLength = Cast<u32>(image->width),
.bufferImageHeight = Cast<u32>(image->height),
.imageSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {},
.imageExtent = texture.m_Extent,
};
vk::CopyBufferToImageInfo2 stagingCopyInfo = {
.srcBuffer = stagingBuffer->m_Buffer,
.dstImage = texture.m_Image,
.dstImageLayout = vk::ImageLayout::eTransferDstOptimal,
.regionCount = 1,
.pRegions = &imageCopy,
};
#pragma endregion
m_CommandBuffer.pipelineBarrier2(&imageStartDependency);
m_CommandBuffer.copyBufferToImage2(&stagingCopyInfo);
m_CommandBuffer.pipelineBarrier2(&postStagingDependency);
context.Dependency(imageStartDependency);
context.UploadTexture(texture, {image->image.data(), image->image.size()});
context.Dependency(postStagingDependency);
GenerateMipMaps(m_CommandBuffer, &texture, vk::ImageLayout::eTransferSrcOptimal,
vk::ImageLayout::eShaderReadOnlyOptimal);
GenerateMipMaps(context, texture, vk::ImageLayout::eTransferSrcOptimal, vk::ImageLayout::eShaderReadOnlyOptimal);
#if !defined(ASTER_NDEBUG)
m_CommandBuffer.endDebugUtilsLabelEXT();
context.EndDebugRegion();
#endif
return m_ResourceManager->CommitTexture(&texture);
auto textureView = m_Device->CreateView<TextureView>(
{.m_Image = texture, .m_Name = image->name.data(), .m_AspectMask = vk::ImageAspectFlagBits::eColor});
return m_Device->m_CommitManager->CommitTexture(textureView);
}
Model
@ -488,10 +440,8 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
tinygltf::Model model;
tinygltf::TinyGLTF loader;
const Device *pDevice = m_ResourceManager->m_Device;
const auto fsPath = fs::absolute(path);
const auto ext = fsPath.extension();
auto const fsPath = fs::absolute(path);
auto const ext = fsPath.extension();
if (ext == GLTF_ASCII_FILE_EXTENSION)
{
std::string err;
@ -513,42 +463,36 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
}
}
AbortIfFailed(m_CommandBuffer.begin(&OneTimeCmdBeginInfo));
auto context = m_Device->CreateTransferContext();
context.Begin();
#if !defined(ASTER_NDEBUG)
StackString<128> loadActionName = "Load: ";
loadActionName += name ? name : path;
vk::DebugUtilsLabelEXT debugLabel = {
.pLabelName = loadActionName.c_str(),
.color = std::array{1.0f, 1.0f, 1.0f, 1.0f},
};
m_CommandBuffer.beginDebugUtilsLabelEXT(&debugLabel);
#endif
context.BeginDebugRegion(loadActionName.c_str());
eastl::vector<StagingBuffer> stagingBuffers;
eastl::hash_map<i32, TextureHandle> textureHandleMap;
eastl::hash_map<i32, systems::ResId<TextureView>> textureHandleMap;
eastl::vector<Material> materials;
StorageBuffer materialsBuffer;
BufferHandle materialsHandle;
Ref<Buffer> materialsBuffer;
if (!model.materials.empty())
{
auto getTextureHandle = [this, &textureHandleMap, &stagingBuffers, &model](i32 index,
bool isSrgb) -> TextureHandle {
// TODO("Something broken on load here.");
auto getTextureHandle = [this, &context, &textureHandleMap,
&model](i32 index, bool const isSrgb) -> systems::ResId<TextureView> {
if (index < 0)
{
return {};
return systems::NullId{};
}
const auto iter = textureHandleMap.find(index);
if (iter != textureHandleMap.end())
if (auto const iter = textureHandleMap.find(index); iter != textureHandleMap.end())
{
return iter->second;
}
auto *image = &model.images[index];
TextureHandle handle = LoadImageToGpu(&stagingBuffers.push_back(), image, isSrgb);
auto const &texture = model.textures[index];
auto *image = &model.images[texture.source];
auto handle = LoadImageToGpu(context, image, isSrgb, texture.name.empty() ? nullptr : texture.name.c_str());
textureHandleMap.emplace(index, handle);
return handle;
};
@ -558,28 +502,22 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
{
materials.push_back({
.m_AlbedoFactor = VectorToVec4(material.pbrMetallicRoughness.baseColorFactor),
.m_EmissionFactor = VectorToVec3(material.emissiveFactor),
.m_MetalFactor = Cast<f32>(material.pbrMetallicRoughness.metallicFactor),
.m_RoughFactor = Cast<f32>(material.pbrMetallicRoughness.roughnessFactor),
.m_EmissionFactor = VectorToVec4(material.emissiveFactor, 0.0f),
.m_AlbedoTex = getTextureHandle(material.pbrMetallicRoughness.baseColorTexture.index, true),
.m_NormalTex = getTextureHandle(material.normalTexture.index, false),
.m_MetalRoughTex =
getTextureHandle(material.pbrMetallicRoughness.metallicRoughnessTexture.index, false),
.m_OcclusionTex = getTextureHandle(material.occlusionTexture.index, false),
.m_EmissionTex = getTextureHandle(material.emissiveTexture.index, true),
.m_MetalFactor = static_cast<f32>(material.pbrMetallicRoughness.metallicFactor),
.m_RoughFactor = static_cast<f32>(material.pbrMetallicRoughness.roughnessFactor),
});
}
usize materialsByteSize = materials.size() * sizeof materials[0];
materialsBuffer.Init(pDevice, materialsByteSize, false, name);
materialsHandle = m_ResourceManager->Commit(&materialsBuffer);
materialsBuffer = m_Device->CreateStorageBuffer(materialsByteSize, name);
StagingBuffer &materialStaging = stagingBuffers.push_back();
materialStaging.Init(pDevice, materialsByteSize);
materialStaging.Write(pDevice, 0, materialsByteSize, materials.data());
vk::BufferCopy bufferCopy = {.srcOffset = 0, .dstOffset = 0, .size = materialsByteSize};
m_CommandBuffer.copyBuffer(materialStaging.m_Buffer, materialsBuffer.m_Buffer, 1, &bufferCopy);
context.UploadBuffer(materialsBuffer, materials);
}
// TODO: Mesh reordering based on nodes AND OR meshoptimizer
@ -618,17 +556,17 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
tinygltf::Buffer *posBuffer = &model.buffers[posBufferView->buffer];
usize byteOffset = (posAccessor->byteOffset + posBufferView->byteOffset);
vertexCount = Cast<u32>(posAccessor->count);
vertexCount = static_cast<u32>(posAccessor->count);
vertexPositions.reserve(vertexOffset + vertexCount);
if (posAccessor->type == TINYGLTF_TYPE_VEC4)
{
vec4 *data = Recast<vec4 *>(posBuffer->data.data() + byteOffset);
auto data = reinterpret_cast<vec4 *>(posBuffer->data.data() + byteOffset);
vertexPositions.insert(vertexPositions.end(), data, data + vertexCount);
}
else if (posAccessor->type == TINYGLTF_TYPE_VEC3)
{
vec3 *data = Recast<vec3 *>(posBuffer->data.data() + byteOffset);
auto data = reinterpret_cast<vec3 *>(posBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
vertexPositions.push_back(vec4(data[i], 1.0f));
@ -636,7 +574,7 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
}
else if (posAccessor->type == TINYGLTF_TYPE_VEC2)
{
vec2 *data = Recast<vec2 *>(posBuffer->data.data() + byteOffset);
auto data = reinterpret_cast<vec2 *>(posBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
vertexPositions.push_back(vec4(data[i], 0.0f, 1.0f));
@ -660,7 +598,7 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
if (normAccessor->type == TINYGLTF_TYPE_VEC4)
{
vec4 *data = Recast<vec4 *>(normBuffer->data.data() + byteOffset);
auto data = reinterpret_cast<vec4 *>(normBuffer->data.data() + byteOffset);
vec4 *end = data + vertexCount;
u32 idx = vertexOffset;
@ -672,7 +610,7 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
}
else if (normAccessor->type == TINYGLTF_TYPE_VEC3)
{
vec3 *data = Recast<vec3 *>(normBuffer->data.data() + byteOffset);
auto data = reinterpret_cast<vec3 *>(normBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
auto norm = vec4(data[i], 0.0f);
@ -681,7 +619,7 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
}
else if (normAccessor->type == TINYGLTF_TYPE_VEC2)
{
vec2 *data = Recast<vec2 *>(normBuffer->data.data() + byteOffset);
auto data = reinterpret_cast<vec2 *>(normBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
auto norm = vec4(data[i], 0.0f, 0.0f);
@ -704,7 +642,7 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
assert(uvAccessor->type == TINYGLTF_TYPE_VEC2 &&
uvAccessor->componentType == TINYGLTF_COMPONENT_TYPE_FLOAT);
{
vec2 *data = Recast<vec2 *>(uvBuffer->data.data() + byteOffset);
auto data = reinterpret_cast<vec2 *>(uvBuffer->data.data() + byteOffset);
vec2 *end = data + vertexCount;
u32 idx = vertexOffset;
vec2 *it = data;
@ -727,7 +665,7 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
if (colorAccessor->type == TINYGLTF_TYPE_VEC4)
{
vec4 *data = Recast<vec4 *>(colorBuffer->data.data() + byteOffset);
auto data = reinterpret_cast<vec4 *>(colorBuffer->data.data() + byteOffset);
vec4 *end = data + vertexCount;
u32 idx = vertexOffset;
@ -739,7 +677,7 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
}
else if (colorAccessor->type == TINYGLTF_TYPE_VEC3)
{
vec3 *data = Recast<vec3 *>(colorBuffer->data.data() + byteOffset);
auto data = reinterpret_cast<vec3 *>(colorBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
auto color = vec4(data[i], 1.0f);
@ -760,22 +698,22 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
tinygltf::Buffer *indexBuffer = &model.buffers[indexBufferView->buffer];
usize byteOffset = (indexAccessor->byteOffset + indexBufferView->byteOffset);
indexCount = Cast<u32>(indexAccessor->count);
indexCount = static_cast<u32>(indexAccessor->count);
indices.reserve(indexOffset + indexCount);
if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_INT)
{
u32 *data = Recast<u32 *>(indexBuffer->data.data() + byteOffset);
auto data = reinterpret_cast<u32 *>(indexBuffer->data.data() + byteOffset);
indices.insert(indices.end(), data, data + indexCount);
}
else if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_SHORT)
{
u16 *data = Recast<u16 *>(indexBuffer->data.data() + byteOffset);
auto data = reinterpret_cast<u16 *>(indexBuffer->data.data() + byteOffset);
indices.insert(indices.end(), data, data + indexCount);
}
else if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_BYTE)
{
u8 *data = Recast<u8 *>(indexBuffer->data.data() + byteOffset);
auto data = reinterpret_cast<u8 *>(indexBuffer->data.data() + byteOffset);
indices.insert(indices.end(), data, data + indexCount);
}
}
@ -810,12 +748,12 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
{
eastl::function<void(i32, i32)> processNode = [&processNode, &model, &nodes, &meshPrimRanges,
&meshPrimitives](i32 idx, i32 parent) -> void {
const auto *node = &model.nodes[idx];
auto const *node = &model.nodes[idx];
vec3 nodeTranslation = vec3{0.0f};
quat nodeRotation = quat{1.0f, 0.0f, 0.0f, 0.0f};
vec3 nodeScale = vec3{1.0f};
mat4 nodeMatrix = mat4{1.0f};
auto nodeTranslation = vec3{0.0f};
auto nodeRotation = quat{1.0f, 0.0f, 0.0f, 0.0f};
auto nodeScale = vec3{1.0f};
auto nodeMatrix = mat4{1.0f};
if (node->translation.size() == 3)
{
nodeTranslation = glm::make_vec3(node->translation.data());
@ -833,21 +771,21 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
{
nodeMatrix = glm::make_mat4(node->matrix.data());
}
const mat4 transform = translate(mat4(1.0f), nodeTranslation) * mat4_cast(nodeRotation) *
mat4 const transform = translate(mat4(1.0f), nodeTranslation) * mat4_cast(nodeRotation) *
scale(mat4(1.0f), nodeScale) * nodeMatrix;
const i32 nodeArrayIndex = Cast<i32>(nodes.Add(transform, parent));
i32 const nodeArrayIndex = static_cast<i32>(nodes.Add(transform, parent));
if (node->mesh >= 0)
{
auto [start, count] = meshPrimRanges[node->mesh];
const auto end = start + count;
auto const end = start + count;
for (usize i = start; i != end; ++i)
{
meshPrimitives[i].m_TransformIdx = nodeArrayIndex;
}
}
for (const i32 child : node->children)
for (i32 const child : node->children)
{
processNode(child, nodeArrayIndex);
}
@ -862,76 +800,46 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
nodes.Update();
StorageBuffer nodeBuffer;
nodeBuffer.Init(pDevice, nodes.GetGlobalTransformByteSize(), true);
nodeBuffer.Write(pDevice, 0, nodes.GetGlobalTransformByteSize(), nodes.GetGlobalTransformPtr());
BufferHandle nodeHandle = m_ResourceManager->Commit(&nodeBuffer);
auto nodeBuffer = m_Device->CreateStorageBuffer(nodes.GetGlobalTransformByteSize());
nodeBuffer->Write(0, nodes.GetGlobalTransformByteSize(), nodes.GetGlobalTransformPtr());
#pragma region Staging / Transfer / Uploads
BufferHandle positionBufferHandle;
BufferHandle vertexDataHandle;
IndexBuffer indexBuffer;
systems::ResId<Buffer> positionBufferHandle = systems::ResId<Buffer>::Null();
systems::ResId<Buffer> vertexDataHandle = systems::ResId<Buffer>::Null();
Ref<IndexBuffer> indexBuffer;
{
auto uploadBufferData = [cmd = this->m_CommandBuffer, &stagingBuffers, pDevice](const Buffer *buffer,
const void *data) {
vk::BufferCopy bufferCopy = {.srcOffset = 0, .dstOffset = 0, .size = buffer->GetSize()};
StagingBuffer &stagingBuffer = stagingBuffers.push_back();
stagingBuffer.Init(pDevice, bufferCopy.size);
stagingBuffer.Write(pDevice, 0, bufferCopy.size, data);
cmd.copyBuffer(stagingBuffer.m_Buffer, buffer->m_Buffer, 1, &bufferCopy);
};
auto positionBuffer = m_Device->CreateStorageBuffer(vertexPositions.size() * sizeof vertexPositions[0]);
context.UploadBuffer(positionBuffer, vertexPositions);
StorageBuffer positionBuffer;
positionBuffer.Init(pDevice, vertexPositions.size() * sizeof vertexPositions[0], false);
positionBufferHandle = m_ResourceManager->Commit(&positionBuffer);
uploadBufferData(&positionBuffer, vertexPositions.data());
auto vertexDataBuffer = m_Device->CreateStorageBuffer(vertexData.size() * sizeof vertexData[0]);
context.UploadBuffer(vertexDataBuffer, vertexData);
StorageBuffer vertexDataBuffer;
vertexDataBuffer.Init(pDevice, vertexData.size() * sizeof vertexData[0], false);
vertexDataHandle = m_ResourceManager->Commit(&vertexDataBuffer);
uploadBufferData(&vertexDataBuffer, vertexData.data());
indexBuffer.Init(pDevice, indices.size() * sizeof indices[0]);
uploadBufferData(&indexBuffer, indices.data());
}
// TODO: Index buffer needs to be separated.
indexBuffer = systems::CastBuffer<IndexBuffer>(
m_Device->CreateIndexBuffer(indices.size() * sizeof indices[0], "Index Buffer"));
context.UploadBuffer(indexBuffer, indices);
#pragma endregion
#if !defined(ASTER_NDEBUG)
m_CommandBuffer.endDebugUtilsLabelEXT();
context.EndDebugRegion();
#endif
AbortIfFailed(m_CommandBuffer.end());
context.End();
vk::SubmitInfo submitInfo = {
.waitSemaphoreCount = 0,
.pWaitDstStageMask = nullptr,
.commandBufferCount = 1,
.pCommandBuffers = &m_CommandBuffer,
};
vk::Fence fence;
vk::FenceCreateInfo fenceCreateInfo = {};
AbortIfFailed(pDevice->m_Device.createFence(&fenceCreateInfo, nullptr, &fence));
AbortIfFailed(m_TransferQueue.submit(1, &submitInfo, fence));
AbortIfFailed(pDevice->m_Device.waitForFences(1, &fence, true, MaxValue<u32>));
pDevice->m_Device.destroy(fence, nullptr);
AbortIfFailed(pDevice->m_Device.resetCommandPool(m_CommandPool, {}));
for (auto &buffer : stagingBuffers)
{
buffer.Destroy(pDevice);
}
auto rcpt = m_Device->Submit(context);
m_Device->WaitOn(rcpt);
Model::ModelHandles handles = {
.m_VertexPositionHandle = positionBufferHandle,
.m_VertexDataHandle = vertexDataHandle,
.m_MaterialsHandle = materialsHandle,
.m_NodeHandle = nodeHandle,
.m_VertexPositionHandle = positionBuffer,
.m_VertexDataHandle = vertexDataBuffer,
.m_MaterialsHandle = materialsBuffer,
.m_NodeHandle = nodeBuffer,
};
Model::ModelHandlesData handlesData = handles;
auto handlesBuffer = m_Device->CreateStorageBuffer(sizeof handlesData, "Materials");
handlesBuffer->Write(0, sizeof handlesData, &handlesData);
eastl::vector<TextureHandle> textureHandles;
eastl::vector<systems::ResId<TextureView>> textureHandles;
textureHandles.reserve(textureHandleMap.size());
for (auto &[key, val] : textureHandleMap)
@ -940,139 +848,45 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
}
return Model{
m_ResourceManager, std::move(textureHandles), std::move(nodes), handles, indexBuffer, meshPrimitives,
textureHandles, std::move(nodes), nodeBuffer, handles, handlesBuffer, indexBuffer, meshPrimitives,
};
}
Model::Model(GpuResourceManager *resourceManager, eastl::vector<TextureHandle> &&textureHandles, Nodes &&nodes,
const ModelHandles &handles, const IndexBuffer &indexBuffer,
const eastl::vector<MeshPrimitive> &meshPrimitives)
: m_ResourceManager(resourceManager)
, m_TextureHandles(std::move(textureHandles))
Model::Model(eastl::vector<systems::ResId<TextureView>> &textureHandles, Nodes &&nodes, Ref<Buffer> nodeBuffer,
ModelHandles &handles, Ref<Buffer> modelHandlesBuffer, Ref<IndexBuffer> indexBuffer,
eastl::vector<MeshPrimitive> const &meshPrimitives)
: m_TextureHandles(std::move(textureHandles))
, m_Nodes(std::move(nodes))
, m_Handles(handles)
, m_IndexBuffer(indexBuffer)
, m_Handles(std::move(handles))
, m_NodeBuffer(std::move(nodeBuffer))
, m_IndexBuffer(std::move(indexBuffer))
, m_ModelHandlesBuffer(std::move(modelHandlesBuffer))
, m_MeshPrimitives(meshPrimitives)
{
}
Model::Model(Model &&other) noexcept
: m_ResourceManager(Take(other.m_ResourceManager))
, m_TextureHandles(std::move(other.m_TextureHandles))
, m_Handles(other.m_Handles)
, m_IndexBuffer(other.m_IndexBuffer)
, m_MeshPrimitives(std::move(other.m_MeshPrimitives))
{
}
Model &
Model::operator=(Model &&other) noexcept
{
if (this == &other)
return *this;
m_ResourceManager = Take(other.m_ResourceManager);
m_TextureHandles = std::move(other.m_TextureHandles);
m_Handles = other.m_Handles;
m_IndexBuffer = other.m_IndexBuffer;
m_MeshPrimitives = std::move(other.m_MeshPrimitives);
return *this;
}
const mat4 &
mat4 const &
Model::GetModelTransform() const
{
return m_Nodes[0];
}
void
Model::SetModelTransform(const mat4 &transform)
Model::SetModelTransform(mat4 const &transform)
{
m_Nodes.Set(0, transform);
}
Model::~Model()
{
if (!m_ResourceManager)
return;
m_IndexBuffer.Destroy(m_ResourceManager->m_Device);
m_ResourceManager->Release(m_Handles.m_VertexDataHandle);
m_ResourceManager->Release(m_Handles.m_NodeHandle);
m_ResourceManager->Release(m_Handles.m_VertexPositionHandle);
m_ResourceManager->Release(m_Handles.m_MaterialsHandle);
for (const TextureHandle &handle : m_TextureHandles)
{
m_ResourceManager->Release(handle);
}
}
void
Model::Update()
{
if (m_Nodes.Update())
{
m_ResourceManager->Write(m_Handles.m_NodeHandle, 0, m_Nodes.GetGlobalTransformByteSize(),
m_Nodes.GetGlobalTransformPtr());
m_NodeBuffer->Write(0, m_Nodes.GetGlobalTransformByteSize(), m_Nodes.GetGlobalTransformPtr());
}
}
AssetLoader::AssetLoader(GpuResourceManager *resourceManager, vk::Queue transferQueue, u32 transferQueueIndex,
u32 graphicsQueueIndex)
: m_ResourceManager(resourceManager)
, m_TransferQueue(transferQueue)
, m_TransferQueueIndex(transferQueueIndex)
, m_GraphicsQueueIndex(graphicsQueueIndex)
{
const Device *pDevice = resourceManager->m_Device;
const vk::CommandPoolCreateInfo poolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient,
.queueFamilyIndex = transferQueueIndex,
};
AbortIfFailedM(pDevice->m_Device.createCommandPool(&poolCreateInfo, nullptr, &m_CommandPool),
"Transfer command pool creation failed.");
pDevice->SetName(m_CommandPool, "Asset Loader Command Pool");
const vk::CommandBufferAllocateInfo commandBufferAllocateInfo = {
.commandPool = m_CommandPool,
.level = vk::CommandBufferLevel::ePrimary,
.commandBufferCount = 1,
};
AbortIfFailed(pDevice->m_Device.allocateCommandBuffers(&commandBufferAllocateInfo, &m_CommandBuffer));
pDevice->SetName(m_CommandBuffer, "Asset Loader Command Buffer");
}
AssetLoader::~AssetLoader()
{
if (m_ResourceManager)
{
m_ResourceManager->m_Device->m_Device.destroy(m_CommandPool, nullptr);
}
}
AssetLoader::AssetLoader(AssetLoader &&other) noexcept
: m_ResourceManager(Take(other.m_ResourceManager))
, m_CommandPool(other.m_CommandPool)
, m_CommandBuffer(other.m_CommandBuffer)
, m_TransferQueue(other.m_TransferQueue)
, m_TransferQueueIndex(other.m_TransferQueueIndex)
, m_GraphicsQueueIndex(other.m_GraphicsQueueIndex)
AssetLoader::AssetLoader(systems::RenderingDevice &device)
: m_Device{&device}
{
}
AssetLoader &
AssetLoader::operator=(AssetLoader &&other) noexcept
{
if (this == &other)
return *this;
m_ResourceManager = Take(other.m_ResourceManager);
m_CommandPool = other.m_CommandPool;
m_CommandBuffer = other.m_CommandBuffer;
m_TransferQueue = other.m_TransferQueue;
m_TransferQueueIndex = other.m_TransferQueueIndex;
m_GraphicsQueueIndex = other.m_GraphicsQueueIndex;
return *this;
}

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: asset_loader.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
@ -9,8 +9,24 @@
#include "aster/core/buffer.h"
#include "gpu_resource_manager.h"
#include "aster/systems/resource.h"
#include "nodes.h"
#include "tiny_gltf.h"
namespace systems
{
class TransferContext;
}
namespace systems
{
class RenderingDevice;
class ResourceManager;
class SamplerManager;
class BufferManager;
class ImageManager;
class CommitManager;
} // namespace systems
namespace tinygltf
{
@ -18,7 +34,6 @@ struct Image;
}
struct Image;
struct TextureHandle;
struct Texture;
constexpr auto GLTF_ASCII_FILE_EXTENSION = ".gltf";
@ -35,15 +50,15 @@ struct MeshPrimitive
struct Material
{
vec4 m_AlbedoFactor; // 16 16
vec3 m_EmissionFactor; // 12 28
f32 m_MetalFactor; // 04 32
f32 m_RoughFactor; // 04 36
TextureHandle m_AlbedoTex; // 04 40
TextureHandle m_NormalTex; // 04 44
TextureHandle m_MetalRoughTex; // 04 48
TextureHandle m_OcclusionTex; // 04 52
TextureHandle m_EmissionTex; // 04 56
vec4 m_AlbedoFactor; // 16 16
vec4 m_EmissionFactor; // 16 32
systems::ResId<TextureView> m_AlbedoTex; // 08 40
systems::ResId<TextureView> m_NormalTex; // 08 48
systems::ResId<TextureView> m_MetalRoughTex; // 08 56
systems::ResId<TextureView> m_OcclusionTex; // 08 64
systems::ResId<TextureView> m_EmissionTex; // 08 72
f32 m_MetalFactor; // 04 76
f32 m_RoughFactor; // 04 80
};
struct VertexData
@ -56,49 +71,61 @@ struct VertexData
struct Model
{
GpuResourceManager *m_ResourceManager;
eastl::vector<TextureHandle> m_TextureHandles;
eastl::vector<systems::ResId<TextureView>> m_TextureHandles;
Nodes m_Nodes;
struct ModelHandlesData
{
uptr m_VertexPositionHandle;
uptr m_VertexDataHandle;
uptr m_MaterialsHandle;
uptr m_NodeHandle;
};
struct ModelHandles
{
BufferHandle m_VertexPositionHandle;
BufferHandle m_VertexDataHandle;
BufferHandle m_MaterialsHandle;
BufferHandle m_NodeHandle;
Ref<Buffer> m_VertexPositionHandle;
Ref<Buffer> m_VertexDataHandle;
Ref<Buffer> m_MaterialsHandle;
Ref<Buffer> m_NodeHandle;
operator ModelHandlesData() const
{
return {
.m_VertexPositionHandle = m_VertexPositionHandle->GetDeviceAddress(),
.m_VertexDataHandle = m_VertexDataHandle->GetDeviceAddress(),
.m_MaterialsHandle = m_MaterialsHandle->GetDeviceAddress(),
.m_NodeHandle = m_NodeHandle->GetDeviceAddress(),
};
}
} m_Handles;
IndexBuffer m_IndexBuffer;
Ref<Buffer> m_NodeBuffer;
Ref<IndexBuffer> m_IndexBuffer;
Ref<Buffer> m_ModelHandlesBuffer;
eastl::vector<MeshPrimitive> m_MeshPrimitives;
[[nodiscard]] const mat4 &GetModelTransform() const;
void SetModelTransform(const mat4 &transform);
[[nodiscard]] mat4 const &GetModelTransform() const;
void SetModelTransform(mat4 const &transform);
void Update();
Model(GpuResourceManager *resourceManager, eastl::vector<TextureHandle> &&textureHandles, Nodes &&nodes,
const ModelHandles &handles, const IndexBuffer &indexBuffer,
const eastl::vector<MeshPrimitive> &meshPrimitives);
~Model();
Model(eastl::vector<systems::ResId<TextureView>> &textureHandles, Nodes &&nodes, Ref<Buffer> nodeBuffer,
ModelHandles &handles, Ref<Buffer> modelHandlesBuffer, Ref<IndexBuffer> indexBuffer,
eastl::vector<MeshPrimitive> const &meshPrimitives);
~Model() = default;
Model(Model &&other) noexcept;
Model &operator=(Model &&other) noexcept;
Model(Model &&other) noexcept = default;
Model &operator=(Model &&other) noexcept = default;
Model(const Model &) = delete;
const Model &operator=(const Model &) = delete;
Model(Model const &) = delete;
Model const &operator=(Model const &) = delete;
};
struct AssetLoader
{
GpuResourceManager *m_ResourceManager;
vk::CommandPool m_CommandPool;
vk::CommandBuffer m_CommandBuffer;
vk::Queue m_TransferQueue;
u32 m_TransferQueueIndex;
u32 m_GraphicsQueueIndex;
systems::RenderingDevice *m_Device;
void LoadHdrImage(Texture *texture, cstr path, cstr name = nullptr) const;
TextureHandle LoadImageToGpu(StagingBuffer *stagingBuffer, tinygltf::Image *image, bool isSrgb) const;
Ref<TextureView> LoadHdrImage(cstr path, cstr name = nullptr) const;
Model LoadModelToGpu(cstr path, cstr name = nullptr);
constexpr static auto ANormal = "NORMAL";
@ -110,17 +137,32 @@ struct AssetLoader
constexpr static auto AJoints0 = "JOINTS_0";
constexpr static auto AWeights0 = "WEIGHTS_0";
AssetLoader(GpuResourceManager *resourceManager, vk::Queue transferQueue, u32 transferQueueIndex,
u32 graphicsQueueIndex);
~AssetLoader();
explicit AssetLoader(systems::RenderingDevice &device);
AssetLoader(AssetLoader &&other) noexcept;
AssetLoader &operator=(AssetLoader &&other) noexcept;
DISALLOW_COPY_AND_ASSIGN(AssetLoader);
private:
systems::ResId<TextureView>
LoadImageToGpu(systems::TransferContext &context, tinygltf::Image *image, bool isSrgb, cstr name = nullptr) const;
};
void GenerateMipMaps(vk::CommandBuffer commandBuffer, Texture *texture, vk::ImageLayout initialLayout,
vk::ImageLayout finalLayout,
vk::PipelineStageFlags2 prevStage = vk::PipelineStageFlagBits2::eAllCommands,
vk::PipelineStageFlags2 finalStage = vk::PipelineStageFlagBits2::eAllCommands);
void
GenerateMipMaps(systems::TransferContext &context, Ref<Texture> const &textureView, vk::ImageLayout initialLayout,
vk::ImageLayout finalLayout, vk::PipelineStageFlags2 prevStage, vk::PipelineStageFlags2 finalStage);
void
GenerateMipMaps(systems::TransferContext &context, concepts::ImageRefTo<Texture> auto &texture,
vk::ImageLayout initialLayout, vk::ImageLayout finalLayout,
vk::PipelineStageFlags2 prevStage = vk::PipelineStageFlagBits2::eAllCommands,
vk::PipelineStageFlags2 finalStage = vk::PipelineStageFlagBits2::eAllCommands)
{
GenerateMipMaps(context, systems::CastImage<Texture>(texture), initialLayout, finalLayout, prevStage, finalStage);
}
void
GenerateMipMaps(systems::TransferContext &context, concepts::ViewRefTo<Texture> auto &texture,
vk::ImageLayout initialLayout, vk::ImageLayout finalLayout,
vk::PipelineStageFlags2 prevStage = vk::PipelineStageFlagBits2::eAllCommands,
vk::PipelineStageFlags2 finalStage = vk::PipelineStageFlagBits2::eAllCommands)
{
GenerateMipMaps(context, systems::CastImage<Texture>(texture->m_Image), initialLayout, finalLayout, prevStage,
finalStage);
}

View File

@ -1,688 +0,0 @@
// =============================================
// Aster: gpu_resource_manager.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "gpu_resource_manager.h"
#include "helpers.h"
#include "aster/core/buffer.h"
#include "aster/core/device.h"
#include "aster/core/image.h"
#include <EASTL/array.h>
void
TextureManager::Init(const u32 maxCapacity)
{
m_MaxCapacity = maxCapacity;
m_FreeHead = GpuResourceHandle::INVALID_HANDLE;
}
TextureHandle
TextureManager::Commit(Texture *texture)
{
ERROR_IF(!texture || !texture->IsValid(), "Texture must be valid for committal")
THEN_ABORT(-1);
if (m_FreeHead != GpuResourceHandle::INVALID_HANDLE)
{
const u32 index = m_FreeHead;
Texture *allocatedTexture = &m_Textures[index];
assert(!allocatedTexture->IsValid());
m_FreeHead = *Recast<u32 *>(allocatedTexture);
// Ensure it is copyable.
static_assert(std::is_trivially_copyable_v<Texture>);
*allocatedTexture = *texture;
// Take ownership of the texture.
texture->m_Flags_ &= ~Texture::OWNED_BIT;
return {index};
}
const u32 index = Cast<u32>(m_Textures.size());
if (index < m_MaxCapacity)
{
Texture *allocatedTexture = &m_Textures.push_back();
// Ensure it is copyable.
static_assert(std::is_trivially_copyable_v<Texture>);
*allocatedTexture = *texture;
texture->m_Flags_ &= ~Texture::OWNED_BIT;
return {index};
}
ERROR("Out of Buffers") THEN_ABORT(-1);
}
Texture *
TextureManager::Fetch(const TextureHandle handle)
{
assert(!handle.IsInvalid());
return &m_Textures[handle.m_Index];
}
void
TextureManager::Release(const Device *device, const TextureHandle handle)
{
assert(!handle.IsInvalid());
Texture *allocatedTexture = &m_Textures[handle.m_Index];
allocatedTexture->Destroy(device);
assert(!allocatedTexture->IsValid());
*Recast<u32 *>(allocatedTexture) = m_FreeHead;
m_FreeHead = handle.m_Index;
}
void
TextureManager::Destroy(const Device *device)
{
for (auto &texture : m_Textures)
{
texture.Destroy(device);
}
}
void
BufferManager::Init(const u32 maxCapacity)
{
m_MaxCapacity = maxCapacity;
m_FreeHead = GpuResourceHandle::INVALID_HANDLE;
}
BufferHandle
BufferManager::Commit(StorageBuffer *buffer)
{
ERROR_IF(!buffer || !buffer->IsValid() || !buffer->IsOwned(), "Buffer must be valid and owned for commital")
THEN_ABORT(-1);
if (m_FreeHead != GpuResourceHandle::INVALID_HANDLE)
{
const u32 index = m_FreeHead;
StorageBuffer *allocatedBuffer = &m_Buffers[index];
assert(!allocatedBuffer->IsValid());
m_FreeHead = *Recast<u32 *>(allocatedBuffer);
// Ensure it is copyable.
static_assert(std::is_trivially_copyable_v<StorageBuffer>);
*allocatedBuffer = *buffer;
// Take ownership of the buffer.
buffer->m_Size_ &= ~StorageBuffer::OWNED_BIT;
return {index};
}
const u32 index = Cast<u32>(m_Buffers.size());
if (index < m_MaxCapacity)
{
StorageBuffer *allocatedBuffer = &m_Buffers.push_back();
// Ensure it is copyable.
static_assert(std::is_trivially_copyable_v<StorageBuffer>);
*allocatedBuffer = *buffer;
buffer->m_Size_ &= ~StorageBuffer::OWNED_BIT;
return {index};
}
ERROR("Out of Buffers") THEN_ABORT(-1);
}
StorageBuffer *
BufferManager::Fetch(const BufferHandle handle)
{
assert(!handle.IsInvalid());
return &m_Buffers[handle.m_Index];
}
void
BufferManager::Release(const Device *device, const BufferHandle handle)
{
assert(!handle.IsInvalid());
StorageBuffer *allocatedBuffer = &m_Buffers[handle.m_Index];
allocatedBuffer->Destroy(device);
assert(!allocatedBuffer->IsValid());
*Recast<u32 *>(allocatedBuffer) = m_FreeHead;
m_FreeHead = handle.m_Index;
}
void
BufferManager::Destroy(const Device *device)
{
for (auto &buffer : m_Buffers)
{
buffer.Destroy(device);
}
}
StorageTextureHandle
StorageTextureManager::Commit(StorageTexture *texture)
{
const TextureHandle tx = TextureManager::Commit(texture);
return {tx.m_Index};
}
StorageTexture *
StorageTextureManager::Fetch(const StorageTextureHandle handle)
{
assert(!handle.IsInvalid());
return Recast<StorageTexture *>(&m_Textures[handle.m_Index]);
}
void
StorageTextureManager::Release(const Device *device, const StorageTextureHandle handle)
{
TextureManager::Release(device, {handle.m_Index});
}
usize
HashSamplerCreateInfo(const vk::SamplerCreateInfo *createInfo)
{
usize hash = HashAny(createInfo->flags);
hash = HashCombine(hash, HashAny(createInfo->magFilter));
hash = HashCombine(hash, HashAny(createInfo->minFilter));
hash = HashCombine(hash, HashAny(createInfo->mipmapMode));
hash = HashCombine(hash, HashAny(createInfo->addressModeU));
hash = HashCombine(hash, HashAny(createInfo->addressModeV));
hash = HashCombine(hash, HashAny(createInfo->addressModeW));
hash = HashCombine(hash, HashAny(Cast<usize>(createInfo->mipLodBias * 1000))); // Resolution of 10^-3
hash = HashCombine(hash, HashAny(createInfo->anisotropyEnable));
hash = HashCombine(hash,
HashAny(Cast<usize>(createInfo->maxAnisotropy * 0x10))); // 16:1 Anisotropy is enough resolution
hash = HashCombine(hash, HashAny(createInfo->compareEnable));
hash = HashCombine(hash, HashAny(createInfo->compareOp));
hash = HashCombine(hash, HashAny(Cast<usize>(createInfo->minLod * 1000))); // 0.001 resolution is enough.
hash = HashCombine(hash,
HashAny(Cast<usize>(createInfo->maxLod * 1000))); // 0.001 resolution is enough. (1 == NO Clamp)
hash = HashCombine(hash, HashAny(createInfo->borderColor));
hash = HashCombine(hash, HashAny(createInfo->unnormalizedCoordinates));
return hash;
}
void
SamplerManager::Init(usize size)
{
m_Samplers.reserve(size);
m_SamplerHashes.reserve(size);
}
SamplerHandle
SamplerManager::Create(const Device *device, const vk::SamplerCreateInfo *createInfo)
{
const usize hash = HashSamplerCreateInfo(createInfo);
for (u32 index = 0; usize samplerHash : m_SamplerHashes)
{
if (samplerHash == hash)
{
return {index};
}
++index;
}
vk::Sampler sampler;
AbortIfFailed(device->m_Device.createSampler(createInfo, nullptr, &sampler));
const u32 index = Cast<u32>(m_SamplerHashes.size());
m_SamplerHashes.push_back(hash);
m_Samplers.push_back(sampler);
return {index};
}
vk::Sampler
SamplerManager::Fetch(const SamplerHandle handle)
{
assert(!handle.IsInvalid());
return m_Samplers[handle.m_Index];
}
void
SamplerManager::Destroy(const Device *device)
{
for (const auto &sampler : m_Samplers)
{
device->m_Device.destroy(sampler, nullptr);
}
m_Samplers.clear();
m_SamplerHashes.clear();
}
GpuResourceManager::WriteInfo::WriteInfo(vk::DescriptorBufferInfo info)
: uBufferInfo(info)
{
}
GpuResourceManager::WriteInfo::WriteInfo(vk::DescriptorImageInfo info)
: uImageInfo(info)
{
}
GpuResourceManager::WriteInfo::WriteInfo(vk::BufferView info)
: uBufferView(info)
{
}
BufferHandle
GpuResourceManager::Commit(StorageBuffer *storageBuffer)
{
const BufferHandle handle = m_BufferManager.Commit(storageBuffer);
m_WriteInfos.emplace_back(vk::DescriptorBufferInfo{
.buffer = storageBuffer->m_Buffer,
.offset = 0,
.range = storageBuffer->GetSize(),
});
m_Writes.push_back({
.dstSet = m_DescriptorSet,
.dstBinding = BUFFER_BINDING_INDEX,
.dstArrayElement = handle.m_Index,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eStorageBuffer,
.pBufferInfo = &m_WriteInfos.back().uBufferInfo,
});
m_WriteOwner.emplace_back(HandleType::eBuffer, handle.m_Index);
#if !defined(ASTER_NDEBUG)
++m_CommitedBufferCount;
#endif
return handle;
}
void
GpuResourceManager::Write(const BufferHandle handle, const usize offset, const usize size, const void *data)
{
m_BufferManager.Fetch(handle)->Write(m_Device, offset, size, data);
}
void
GpuResourceManager::EraseWrites(u32 handleIndex, HandleType handleType)
{
auto writeIter = m_Writes.begin();
auto ownerIter = m_WriteOwner.begin();
const auto ownerEnd = m_WriteOwner.end();
while (ownerIter != ownerEnd)
{
if (ownerIter->first == handleType && ownerIter->second == handleIndex)
{
*writeIter = m_Writes.back();
*ownerIter = m_WriteOwner.back();
m_Writes.pop_back();
m_WriteOwner.pop_back();
return;
}
++ownerIter;
++writeIter;
}
}
void
GpuResourceManager::Release(BufferHandle handle)
{
if (handle.IsInvalid())
return;
EraseWrites(handle.m_Index, HandleType::eBuffer);
m_BufferManager.Release(m_Device, handle);
#if !defined(ASTER_NDEBUG)
--m_CommitedBufferCount;
#endif
}
void
GpuResourceManager::Release(StorageBuffer *storageBuffer, const BufferHandle handle)
{
assert(storageBuffer);
assert(!storageBuffer->IsValid());
StorageBuffer *internal = m_BufferManager.Fetch(handle);
*storageBuffer = *internal;
internal->m_Size_ &= ~StorageBuffer::OWNED_BIT;
Release(handle);
}
void
GpuResourceManager::Release(TextureHandle handle)
{
if (handle.IsInvalid())
return;
EraseWrites(handle.m_Index, HandleType::eTexture);
m_TextureManager.Release(m_Device, handle);
#if !defined(ASTER_NDEBUG)
--m_CommitedTextureCount;
#endif
}
void
GpuResourceManager::Release(Texture *texture, TextureHandle handle)
{
assert(texture);
assert(!texture->IsValid());
Texture *internal = m_TextureManager.Fetch(handle);
*texture = *internal;
internal->m_Flags_ &= ~Texture::OWNED_BIT;
Release(handle);
}
TextureHandle
GpuResourceManager::CommitTexture(Texture *texture, const SamplerHandle sampler)
{
TextureHandle handle = m_TextureManager.Commit(texture);
const vk::Sampler samplerImpl = sampler.IsInvalid() ? m_DefaultSampler : m_SamplerManager.Fetch(sampler);
m_WriteInfos.emplace_back(vk::DescriptorImageInfo{
.sampler = samplerImpl,
.imageView = texture->m_View,
.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
});
m_Writes.push_back({
.dstSet = m_DescriptorSet,
.dstBinding = TEXTURE_BINDING_INDEX,
.dstArrayElement = handle.m_Index,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
.pImageInfo = &m_WriteInfos.back().uImageInfo,
});
m_WriteOwner.emplace_back(HandleType::eTexture, handle.m_Index);
#if !defined(ASTER_NDEBUG)
++m_CommitedTextureCount;
#endif
return {handle};
}
StorageTextureHandle
GpuResourceManager::CommitStorageTexture(StorageTexture *storageTexture, SamplerHandle sampler)
{
StorageTextureHandle handle = m_StorageTextureManager.Commit(storageTexture);
vk::Sampler samplerImpl = sampler.IsInvalid() ? m_DefaultSampler : m_SamplerManager.Fetch(sampler);
m_WriteInfos.emplace_back(vk::DescriptorImageInfo{
.sampler = samplerImpl,
.imageView = storageTexture->m_View,
.imageLayout = vk::ImageLayout::eGeneral,
});
m_Writes.push_back({
.dstSet = m_DescriptorSet,
.dstBinding = STORAGE_TEXTURE_BINDING_INDEX,
.dstArrayElement = handle.m_Index,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eStorageImage,
.pImageInfo = &m_WriteInfos.back().uImageInfo,
});
m_WriteOwner.emplace_back(HandleType::eStorageTexture, handle.m_Index);
#if !defined(ASTER_NDEBUG)
++m_CommitedStorageTextureCount;
#endif
return {handle};
}
void
GpuResourceManager::Release(StorageTextureHandle handle)
{
if (handle.IsInvalid())
return;
EraseWrites(handle.m_Index, HandleType::eTexture);
m_StorageTextureManager.Release(m_Device, handle);
#if !defined(ASTER_NDEBUG)
--m_CommitedStorageTextureCount;
#endif
}
void
GpuResourceManager::Release(StorageTexture *texture, const StorageTextureHandle handle)
{
assert(texture);
assert(!texture->IsValid());
StorageTexture *internal = m_StorageTextureManager.Fetch(handle);
*texture = *internal;
internal->m_Flags_ &= ~StorageTexture::OWNED_BIT;
Release(handle);
}
void
GpuResourceManager::Update()
{
if (m_Writes.empty() || m_WriteInfos.empty())
return;
m_Device->m_Device.updateDescriptorSets(Cast<u32>(m_Writes.size()), m_Writes.data(), 0, nullptr);
m_Writes.clear();
m_WriteInfos.clear();
m_WriteOwner.clear();
}
GpuResourceManager::GpuResourceManager(Device *device, u16 maxSize)
: m_Device(device)
{
vk::PhysicalDeviceProperties properties;
m_Device->m_PhysicalDevice.getProperties(&properties);
u32 buffersCount = eastl::min(properties.limits.maxPerStageDescriptorStorageBuffers - 1024, Cast<u32>(maxSize));
u32 texturesCount = eastl::min(properties.limits.maxPerStageDescriptorSampledImages - 1024, Cast<u32>(maxSize));
u32 storageTexturesCount =
eastl::min(properties.limits.maxPerStageDescriptorStorageImages - 1024, Cast<u32>(maxSize));
INFO("Max Buffer Count: {}", buffersCount);
INFO("Max Texture Count: {}", texturesCount);
INFO("Max Storage Texture Count: {}", storageTexturesCount);
m_BufferManager.Init(buffersCount);
m_TextureManager.Init(texturesCount);
m_StorageTextureManager.Init(storageTexturesCount);
m_SamplerManager.Init(storageTexturesCount);
m_DefaultSamplerCreateInfo = {
.magFilter = vk::Filter::eLinear,
.minFilter = vk::Filter::eLinear,
.mipmapMode = vk::SamplerMipmapMode::eLinear,
.addressModeU = vk::SamplerAddressMode::eRepeat,
.addressModeV = vk::SamplerAddressMode::eRepeat,
.addressModeW = vk::SamplerAddressMode::eRepeat,
.mipLodBias = 0.0f,
.anisotropyEnable = true,
.maxAnisotropy = properties.limits.maxSamplerAnisotropy,
.compareEnable = false,
.minLod = 0,
.maxLod = VK_LOD_CLAMP_NONE,
.borderColor = vk::BorderColor::eFloatOpaqueBlack,
.unnormalizedCoordinates = false,
};
m_DefaultSampler = m_SamplerManager.Fetch(m_SamplerManager.Create(device, &m_DefaultSamplerCreateInfo));
eastl::array poolSizes = {
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eStorageBuffer,
.descriptorCount = buffersCount,
},
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eCombinedImageSampler,
.descriptorCount = texturesCount,
},
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eStorageImage,
.descriptorCount = storageTexturesCount,
},
};
const vk::DescriptorPoolCreateInfo poolCreateInfo = {
.flags = vk::DescriptorPoolCreateFlagBits::eUpdateAfterBind,
.maxSets = 1,
.poolSizeCount = Cast<u32>(poolSizes.size()),
.pPoolSizes = poolSizes.data(),
};
AbortIfFailed(device->m_Device.createDescriptorPool(&poolCreateInfo, nullptr, &m_DescriptorPool));
vk::DescriptorBindingFlags bindingFlags =
vk::DescriptorBindingFlagBits::ePartiallyBound | vk::DescriptorBindingFlagBits::eUpdateAfterBind;
eastl::array layoutBindingFlags = {
bindingFlags,
bindingFlags,
bindingFlags,
};
vk::DescriptorSetLayoutBindingFlagsCreateInfo bindingFlagsCreateInfo = {
.bindingCount = Cast<u32>(layoutBindingFlags.size()),
.pBindingFlags = layoutBindingFlags.data(),
};
eastl::array descriptorLayoutBindings = {
vk::DescriptorSetLayoutBinding{
.binding = BUFFER_BINDING_INDEX,
.descriptorType = vk::DescriptorType::eStorageBuffer,
.descriptorCount = Cast<u32>(buffersCount),
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
vk::DescriptorSetLayoutBinding{
.binding = TEXTURE_BINDING_INDEX,
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
.descriptorCount = Cast<u32>(texturesCount),
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
vk::DescriptorSetLayoutBinding{
.binding = STORAGE_TEXTURE_BINDING_INDEX,
.descriptorType = vk::DescriptorType::eStorageImage,
.descriptorCount = Cast<u32>(storageTexturesCount),
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
};
static_assert(layoutBindingFlags.size() == descriptorLayoutBindings.size());
const vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = {
.pNext = &bindingFlagsCreateInfo,
.flags = vk::DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool,
.bindingCount = Cast<u32>(descriptorLayoutBindings.size()),
.pBindings = descriptorLayoutBindings.data(),
};
AbortIfFailed(device->m_Device.createDescriptorSetLayout(&descriptorSetLayoutCreateInfo, nullptr, &m_SetLayout));
// One descriptor is enough. Updating it at any time is safe. (Update until submit, data held when pending)
// https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VK_EXT_descriptor_indexing.html
// https://github.com/KhronosGroup/Vulkan-Guide/blob/main/chapters/extensions/VK_EXT_descriptor_indexing.adoc
const vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo = {
.descriptorPool = m_DescriptorPool,
.descriptorSetCount = 1,
.pSetLayouts = &m_SetLayout,
};
AbortIfFailed(device->m_Device.allocateDescriptorSets(&descriptorSetAllocateInfo, &m_DescriptorSet));
m_Device->SetName(m_SetLayout, "Bindless Layout");
m_Device->SetName(m_DescriptorPool, "Bindless Pool");
m_Device->SetName(m_DescriptorSet, "Bindless Set");
}
GpuResourceManager::~GpuResourceManager()
{
#if !defined(ASTER_NDEBUG)
WARN_IF(m_CommitedBufferCount > 0 || m_CommitedTextureCount > 0 || m_CommitedStorageTextureCount > 0,
"Resources alive: SSBO = {}, Textures = {}, RWTexture = {}", m_CommitedBufferCount, m_CommitedTextureCount,
m_CommitedStorageTextureCount);
#endif
m_BufferManager.Destroy(m_Device);
m_TextureManager.Destroy(m_Device);
m_StorageTextureManager.Destroy(m_Device);
m_SamplerManager.Destroy(m_Device);
m_Device->m_Device.destroy(m_DescriptorPool, nullptr);
m_Device->m_Device.destroy(m_SetLayout, nullptr);
}
GpuResourceManager::GpuResourceManager(GpuResourceManager &&other) noexcept
: m_WriteInfos(std::move(other.m_WriteInfos))
, m_Writes(std::move(other.m_Writes))
, m_WriteOwner(std::move(other.m_WriteOwner))
, m_BufferManager(std::move(other.m_BufferManager))
, m_TextureManager(std::move(other.m_TextureManager))
, m_StorageTextureManager(std::move(other.m_StorageTextureManager))
, m_SamplerManager(std::move(other.m_SamplerManager))
, m_Device(Take(other.m_Device))
, m_DescriptorPool(other.m_DescriptorPool)
, m_SetLayout(other.m_SetLayout)
, m_DescriptorSet(other.m_DescriptorSet)
#if !defined(ASTER_NDEBUG)
, m_CommitedBufferCount(other.m_CommitedBufferCount)
, m_CommitedTextureCount(other.m_CommitedTextureCount)
, m_CommitedStorageTextureCount(other.m_CommitedStorageTextureCount)
#endif
{
assert(!other.m_Device);
}
GpuResourceManager &
GpuResourceManager::operator=(GpuResourceManager &&other) noexcept
{
if (this == &other)
return *this;
m_WriteInfos = std::move(other.m_WriteInfos);
m_Writes = std::move(other.m_Writes);
m_WriteOwner = std::move(other.m_WriteOwner);
m_BufferManager = std::move(other.m_BufferManager);
m_TextureManager = std::move(other.m_TextureManager);
m_StorageTextureManager = std::move(other.m_StorageTextureManager);
m_SamplerManager = std::move(other.m_SamplerManager);
m_Device = Take(other.m_Device); // Ensure taken.
m_DescriptorPool = other.m_DescriptorPool;
m_SetLayout = other.m_SetLayout;
m_DescriptorSet = other.m_DescriptorSet;
#if !defined(ASTER_NDEBUG)
m_CommitedBufferCount = other.m_CommitedBufferCount;
m_CommitedTextureCount = other.m_CommitedTextureCount;
m_CommitedStorageTextureCount = other.m_CommitedStorageTextureCount;
#endif
assert(!other.m_Device);
return *this;
}
SamplerHandle
GpuResourceManager::CreateSampler(const vk::SamplerCreateInfo *samplerCreateInfo)
{
return m_SamplerManager.Create(m_Device, samplerCreateInfo);
}

View File

@ -1,175 +0,0 @@
// =============================================
// Aster: gpu_resource_manager.h
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "aster/aster.h"
#include <EASTL/deque.h>
#include <EASTL/vector_map.h>
struct Device;
struct Texture;
struct StorageTexture;
struct StorageBuffer;
struct GpuResourceHandle
{
constexpr static u32 INVALID_HANDLE = MaxValue<u32>;
u32 m_Index = INVALID_HANDLE; // Default = invalid
[[nodiscard]] bool
IsInvalid() const
{
return m_Index == INVALID_HANDLE;
}
};
struct BufferHandle : GpuResourceHandle
{
};
struct TextureHandle : GpuResourceHandle
{
};
struct StorageTextureHandle : GpuResourceHandle
{
};
struct SamplerHandle : GpuResourceHandle
{
};
struct TextureManager
{
eastl::vector<Texture> m_Textures;
u32 m_MaxCapacity;
u32 m_FreeHead;
void Init(u32 maxCapacity);
TextureHandle Commit(Texture *texture);
Texture *Fetch(TextureHandle handle);
void Release(const Device *device, TextureHandle handle);
void Destroy(const Device *device);
};
struct BufferManager
{
eastl::vector<StorageBuffer> m_Buffers;
u32 m_MaxCapacity;
u32 m_FreeHead;
void Init(u32 maxCapacity);
BufferHandle Commit(StorageBuffer *buffer);
StorageBuffer *Fetch(BufferHandle handle);
void Release(const Device *device, BufferHandle handle);
void Destroy(const Device *device);
};
struct StorageTextureManager : TextureManager
{
StorageTextureHandle Commit(StorageTexture *texture);
StorageTexture *Fetch(StorageTextureHandle handle);
void Release(const Device *device, StorageTextureHandle handle);
};
struct SamplerManager
{
// There can only be so many samplers.
eastl::vector<vk::Sampler> m_Samplers;
eastl::vector<usize> m_SamplerHashes;
void Init(usize size);
SamplerHandle Create(const Device *device, const vk::SamplerCreateInfo *createInfo);
vk::Sampler Fetch(SamplerHandle handle);
void Destroy(const Device *device);
};
struct GpuResourceManager
{
private:
union WriteInfo {
vk::DescriptorBufferInfo uBufferInfo;
vk::DescriptorImageInfo uImageInfo;
vk::BufferView uBufferView;
WriteInfo()
{
}
explicit WriteInfo(vk::DescriptorBufferInfo info);
explicit WriteInfo(vk::DescriptorImageInfo info);
explicit WriteInfo(vk::BufferView info);
};
enum class HandleType
{
eBuffer,
eTexture,
eStorageTexture,
};
using WriteOwner = eastl::pair<HandleType, u32>;
eastl::deque<WriteInfo> m_WriteInfos;
eastl::vector<vk::WriteDescriptorSet> m_Writes;
eastl::vector<WriteOwner> m_WriteOwner;
vk::Sampler m_DefaultSampler;
BufferManager m_BufferManager;
TextureManager m_TextureManager;
StorageTextureManager m_StorageTextureManager;
SamplerManager m_SamplerManager;
void EraseWrites(u32 handleIndex, HandleType handleType);
public:
Device *m_Device;
constexpr static u32 BUFFER_BINDING_INDEX = 0;
constexpr static u32 TEXTURE_BINDING_INDEX = 1;
constexpr static u32 STORAGE_TEXTURE_BINDING_INDEX = 2;
vk::SamplerCreateInfo m_DefaultSamplerCreateInfo;
vk::DescriptorPool m_DescriptorPool;
vk::DescriptorSetLayout m_SetLayout;
vk::DescriptorSet m_DescriptorSet;
BufferHandle Commit(StorageBuffer *storageBuffer); // Commit to GPU and take Ownership
void Write(BufferHandle handle, usize offset, usize size, const void *data); // Write to buffer
void Release(BufferHandle handle); // Release and Destroy
void Release(StorageBuffer *storageBuffer, BufferHandle handle); // Release and Return
TextureHandle CommitTexture(Texture *texture, SamplerHandle sampler = {}); // Commit to GPU and take Ownership
void Release(TextureHandle handle); // Release and Destroy
void Release(Texture *texture, TextureHandle handle); // Release and Return
StorageTextureHandle
CommitStorageTexture(StorageTexture *storageTexture, SamplerHandle sampler = {}); // Commit to GPU and take Ownership
void Release(StorageTextureHandle handle); // Release and Destroy
void Release(StorageTexture *texture, StorageTextureHandle handle); // Release and Return
SamplerHandle CreateSampler(const vk::SamplerCreateInfo *samplerCreateInfo);
void Update(); // Update all the descriptors required.
// Ctor/Dtor
GpuResourceManager(Device *device, u16 maxSize);
~GpuResourceManager();
GpuResourceManager(GpuResourceManager &&other) noexcept;
GpuResourceManager &operator=(GpuResourceManager &&other) noexcept;
#if !defined(ASTER_NDEBUG)
usize m_CommitedBufferCount = 0;
usize m_CommitedTextureCount = 0;
usize m_CommitedStorageTextureCount = 0;
#endif
DISALLOW_COPY_AND_ASSIGN(GpuResourceManager);
};

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: ibl_helpers.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "ibl_helpers.h"
@ -9,82 +9,91 @@
#include "aster/core/image.h"
#include "asset_loader.h"
#include "gpu_resource_manager.h"
#include "helpers.h"
#include "pipeline_utils.h"
#include "aster/systems/commit_manager.h"
#include "aster/systems/rendering_device.h"
#include <EASTL/fixed_vector.h>
#include <EASTL/tuple.h>
constexpr cstr EQUIRECT_TO_CUBE_SHADER_FILE = "shader/eqrect_to_cube.cs.hlsl.spv";
constexpr cstr DIFFUSE_IRRADIANCE_SHADER_FILE = "shader/diffuse_irradiance.cs.hlsl.spv";
constexpr cstr PREFILTER_SHADER_FILE = "shader/prefilter.cs.hlsl.spv";
constexpr cstr BRDF_LUT_SHADER_FILE = "shader/brdf_lut.cs.hlsl.spv";
void
Environment::Destroy(GpuResourceManager *resourceManager)
{
resourceManager->Release(Take(m_Skybox));
resourceManager->Release(Take(m_Diffuse));
resourceManager->Release(Take(m_Prefilter));
resourceManager->Release(Take(m_BrdfLut));
}
constexpr auto EQUIRECT_TO_CUBE_SHADER_FILE = "eqrect_to_cube";
constexpr auto ENVIRONMENT_SHADER_FILE = "environment";
constexpr auto DIFFUSE_IRRADIANCE_ENTRY = "diffuseIrradiance";
constexpr auto PREFILTER_ENTRY = "prefilter";
constexpr auto BRDF_LUT_ENTRY = "brdfLut";
Environment
CreateCubeFromHdrEnv(AssetLoader *assetLoader, vk::Queue computeQueue, const u32 cubeSide, TextureHandle hdrEnv,
const cstr name)
CreateCubeFromHdrEnv(AssetLoader &assetLoader, u32 const cubeSide, systems::ResId<TextureView> hdrEnv)
{
GpuResourceManager *resMan = assetLoader->m_ResourceManager;
const Device *pDevice = resMan->m_Device;
systems::RenderingDevice &device = *assetLoader.m_Device;
auto *commitManager = device.m_CommitManager.get();
vk::SamplerCreateInfo brdfLutSamplerCreateInfo = resMan->m_DefaultSamplerCreateInfo;
brdfLutSamplerCreateInfo.addressModeU = vk::SamplerAddressMode::eClampToEdge;
brdfLutSamplerCreateInfo.addressModeV = vk::SamplerAddressMode::eClampToEdge;
brdfLutSamplerCreateInfo.addressModeW = vk::SamplerAddressMode::eClampToEdge;
auto skybox = device.CreateTextureCubeWithView<StorageTextureCubeView>({
.m_Format = vk::Format::eR16G16B16A16Sfloat,
.m_Side = cubeSide,
.m_Name = "Skybox",
.m_IsSampled = true,
.m_IsMipMapped = true,
.m_IsStorage = true,
});
StorageTextureCube skybox;
StorageTextureCube diffuseIrradiance;
StorageTextureCube prefilterCube;
StorageTexture brdfLut;
SamplerHandle brdfLutSampler;
auto skyboxHandle = commitManager->CommitTexture(skybox);
auto skyboxStorageHandle = commitManager->CommitStorageImage(skybox);
skybox.Init(pDevice, cubeSide, vk::Format::eR16G16B16A16Sfloat, true, true, "Skybox");
TextureHandle skyboxHandle = resMan->CommitTexture(&skybox);
StorageTextureHandle skyboxStorageHandle = resMan->CommitStorageTexture(&skybox);
auto diffuseIrradiance = device.CreateTextureCubeWithView<StorageTextureCubeView>({
.m_Format = vk::Format::eR16G16B16A16Sfloat,
.m_Side = 64,
.m_Name = "Diffuse Irradiance",
.m_IsSampled = true,
.m_IsMipMapped = false,
.m_IsStorage = true,
});
auto diffuseIrradianceHandle = commitManager->CommitTexture(diffuseIrradiance);
auto diffuseIrradianceStorageHandle = commitManager->CommitStorageImage(diffuseIrradiance);
diffuseIrradiance.Init(pDevice, 64, vk::Format::eR16G16B16A16Sfloat, true, false, "Diffuse Irradiance");
TextureHandle diffuseIrradianceHandle = resMan->CommitTexture(&diffuseIrradiance);
StorageTextureHandle diffuseIrradianceStorageHandle = resMan->CommitStorageTexture(&diffuseIrradiance);
prefilterCube.Init(pDevice, cubeSide, vk::Format::eR16G16B16A16Sfloat, true, true, "Prefilter");
TextureHandle prefilterHandle = resMan->CommitTexture(&prefilterCube); // This stores the original view for us.
auto prefilterCube = device.CreateTextureCubeWithView<StorageTextureCubeView>({
.m_Format = vk::Format::eR16G16B16A16Sfloat,
.m_Side = cubeSide,
.m_Name = "Prefilter",
.m_IsSampled = true,
.m_IsMipMapped = true,
.m_IsStorage = true,
});
auto prefilterHandle = commitManager->CommitTexture(prefilterCube); // This stores the original view for us.
constexpr u32 prefilterMipCountMax = 6;
eastl::array<StorageTextureHandle, prefilterMipCountMax> prefilterStorageHandles;
eastl::fixed_vector<systems::ResId<StorageImageView>, prefilterMipCountMax> prefilterStorageHandles;
// All non-owning copies.
for (u32 mipLevel = 0; auto &tex : prefilterStorageHandles)
for (u8 mipLevel = 0; mipLevel < prefilterMipCountMax; ++mipLevel)
{
vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = prefilterCube.m_Image,
.viewType = vk::ImageViewType::eCube,
.format = vk::Format::eR16G16B16A16Sfloat,
.components = vk::ComponentMapping{},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = mipLevel++,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 6,
},
};
AbortIfFailed(pDevice->m_Device.createImageView(&imageViewCreateInfo, nullptr, &prefilterCube.m_View));
tex = resMan->CommitStorageTexture(&prefilterCube);
auto view = device.CreateView<StorageTextureCubeView>({
.m_Image = systems::CastImage<StorageTextureCube>(prefilterCube->m_Image),
.m_ViewType = vk::ImageViewType::eCube,
.m_AspectMask = vk::ImageAspectFlagBits::eColor,
.m_MipLevelCount = 1,
.m_LayerCount = 6,
.m_BaseMipLevel = mipLevel,
.m_BaseLayer = 0,
});
prefilterStorageHandles.push_back(commitManager->CommitStorageImage(view));
}
brdfLut.Init(pDevice, {512, 512}, vk::Format::eR16G16Sfloat, true, "BRDF LUT");
brdfLutSampler = resMan->CreateSampler(&brdfLutSamplerCreateInfo);
TextureHandle brdfLutHandle = resMan->CommitTexture(&brdfLut, brdfLutSampler);
StorageTextureHandle brdfLutStorageHandle = resMan->CommitStorageTexture(&brdfLut);
auto brdfLut = device.CreateTexture2DWithView<StorageTextureView>({
.m_Format = vk::Format::eR16G16Sfloat,
.m_Extent = {512, 512},
.m_Name = "BRDF LUT",
.m_IsSampled = true,
.m_IsMipMapped = false,
.m_IsStorage = true,
});
auto brdfLutSampler = device.CreateSampler({
.m_AddressModeU = vk::SamplerAddressMode::eClampToEdge,
.m_AddressModeV = vk::SamplerAddressMode::eClampToEdge,
.m_AddressModeW = vk::SamplerAddressMode::eClampToEdge,
});
auto brdfLutHandle = commitManager->CommitTexture(brdfLut, brdfLutSampler);
auto brdfLutStorageHandle = commitManager->CommitStorageImage(brdfLut);
#pragma region Dependencies and Copies
vk::ImageSubresourceRange cubeSubresRange = {
@ -114,14 +123,14 @@ CreateCubeFromHdrEnv(AssetLoader *assetLoader, vk::Queue computeQueue, const u32
.subresourceRange = cubeSubresRange,
};
eastl::fixed_vector<vk::ImageMemoryBarrier2, 4> readyToWriteBarriers(4, readyToWriteBarrierTemplate);
readyToWriteBarriers[0].image = skybox.m_Image;
readyToWriteBarriers[1].image = diffuseIrradiance.m_Image;
readyToWriteBarriers[2].image = prefilterCube.m_Image;
readyToWriteBarriers[3].image = brdfLut.m_Image;
readyToWriteBarriers[0].image = skybox->GetImage();
readyToWriteBarriers[1].image = diffuseIrradiance->GetImage();
readyToWriteBarriers[2].image = prefilterCube->GetImage();
readyToWriteBarriers[3].image = brdfLut->GetImage();
readyToWriteBarriers[3].subresourceRange = lutSubresRange;
vk::DependencyInfo readyToWriteDependency = {
.imageMemoryBarrierCount = Cast<u32>(readyToWriteBarriers.size()),
.imageMemoryBarrierCount = static_cast<u32>(readyToWriteBarriers.size()),
.pImageMemoryBarriers = readyToWriteBarriers.data(),
};
@ -136,16 +145,16 @@ CreateCubeFromHdrEnv(AssetLoader *assetLoader, vk::Queue computeQueue, const u32
.subresourceRange = cubeSubresRange,
};
auto skyboxToSampleBarrier = readyToSampleBarrierTemplate;
skyboxToSampleBarrier.image = skybox.m_Image;
skyboxToSampleBarrier.image = skybox->GetImage();
auto diffIrrToSampleBarrier = readyToSampleBarrierTemplate;
diffIrrToSampleBarrier.image = diffuseIrradiance.m_Image;
diffIrrToSampleBarrier.image = diffuseIrradiance->GetImage();
auto prefilterToSampleBarrier = readyToSampleBarrierTemplate;
prefilterToSampleBarrier.image = prefilterCube.m_Image;
prefilterToSampleBarrier.image = prefilterCube->GetImage();
auto brdfToSampleBarrier = readyToSampleBarrierTemplate;
prefilterToSampleBarrier.image = brdfLut.m_Image;
prefilterToSampleBarrier.image = brdfLut->GetImage();
prefilterToSampleBarrier.subresourceRange = lutSubresRange;
vk::DependencyInfo skyboxToSampleDependency = {
@ -169,103 +178,144 @@ CreateCubeFromHdrEnv(AssetLoader *assetLoader, vk::Queue computeQueue, const u32
struct SkyboxPushConstants
{
TextureHandle m_HdrEnvHandle;
StorageTextureHandle m_OutputTexture;
systems::ResId<TextureView> m_HdrEnvHandle;
systems::ResId<StorageImageView> m_OutputTexture;
u32 m_CubeSide;
};
struct DiffuseIrradiancePushConstants
{
TextureHandle m_SkyboxHandle;
StorageTextureHandle m_OutputTexture;
systems::ResId<TextureView> m_SkyboxHandle;
systems::ResId<StorageImageView> m_OutputTexture;
u32 m_CubeSide;
};
struct PrefilterPushConstants
{
TextureHandle m_SkyboxHandle;
StorageTextureHandle m_OutputTexture;
systems::ResId<TextureView> m_SkyboxHandle;
systems::ResId<StorageImageView> m_OutputTexture;
u32 m_CubeSide;
f32 m_Roughness;
u32 m_EnvSide;
};
struct BrdfLutPushConstants
{
StorageTextureHandle m_OutputTexture;
systems::ResId<StorageImageView> m_OutputTexture;
};
#pragma region Pipeline Creation etc
vk::PushConstantRange pcr = {
.stageFlags = vk::ShaderStageFlagBits::eCompute,
.offset = 0,
.size = Cast<u32>(eastl::max(eastl::max(sizeof(SkyboxPushConstants), sizeof(BrdfLutPushConstants)),
eastl::max(sizeof(DiffuseIrradiancePushConstants), sizeof(PrefilterPushConstants)))),
};
// vk::PushConstantRange pcr = {
// .stageFlags = vk::ShaderStageFlagBits::eCompute,
// .offset = 0,
// .size = static_cast<u32>(
// eastl::max(eastl::max(sizeof(SkyboxPushConstants), sizeof(BrdfLutPushConstants)),
// eastl::max(sizeof(DiffuseIrradiancePushConstants), sizeof(PrefilterPushConstants)))),
// };
vk::PipelineLayout pipelineLayout;
const vk::PipelineLayoutCreateInfo layoutCreateInfo = {
.setLayoutCount = 1,
.pSetLayouts = &resMan->m_SetLayout,
.pushConstantRangeCount = 1,
.pPushConstantRanges = &pcr,
};
AbortIfFailed(pDevice->m_Device.createPipelineLayout(&layoutCreateInfo, nullptr, &pipelineLayout));
// vk::PipelineLayout pipelineLayout;
// const vk::PipelineLayoutCreateInfo layoutCreateInfo = {
// .setLayoutCount = 1,
// .pSetLayouts = &commitManager->GetDescriptorSetLayout(),
// .pushConstantRangeCount = 1,
// .pPushConstantRanges = &pcr,
// };
// AbortIfFailed(device.m_Device->createPipelineLayout(&layoutCreateInfo, nullptr, &pipelineLayout));
const auto eqRectToCubeShader = CreateShader(pDevice, EQUIRECT_TO_CUBE_SHADER_FILE);
const auto diffuseRadianceShader = CreateShader(pDevice, DIFFUSE_IRRADIANCE_SHADER_FILE);
const auto prefilterShader = CreateShader(pDevice, PREFILTER_SHADER_FILE);
const auto brdfLutShader = CreateShader(pDevice, BRDF_LUT_SHADER_FILE);
eastl::array computePipelineCreateInfo = {
vk::ComputePipelineCreateInfo{
.stage =
{
.stage = vk::ShaderStageFlagBits::eCompute,
.module = eqRectToCubeShader,
.pName = "main",
},
.layout = pipelineLayout,
},
vk::ComputePipelineCreateInfo{
.stage =
{
.stage = vk::ShaderStageFlagBits::eCompute,
.module = diffuseRadianceShader,
.pName = "main",
},
.layout = pipelineLayout,
},
vk::ComputePipelineCreateInfo{
.stage =
{
.stage = vk::ShaderStageFlagBits::eCompute,
.module = prefilterShader,
.pName = "main",
},
.layout = pipelineLayout,
},
vk::ComputePipelineCreateInfo{
.stage =
{
.stage = vk::ShaderStageFlagBits::eCompute,
.module = brdfLutShader,
.pName = "main",
},
.layout = pipelineLayout,
},
};
// const auto eqRectToCubeShader = CreateShader(pDevice, EQUIRECT_TO_CUBE_SHADER_FILE);
// const auto diffuseRadianceShader = CreateShader(pDevice, DIFFUSE_IRRADIANCE_SHADER_FILE);
// const auto prefilterShader = CreateShader(pDevice, PREFILTER_SHADER_FILE);
// const auto brdfLutShader = CreateShader(pDevice, BRDF_LUT_SHADER_FILE);
// eastl::array computePipelineCreateInfo = {
// vk::ComputePipelineCreateInfo{
// .stage =
// {
// .stage = vk::ShaderStageFlagBits::eCompute,
// .module = eqRectToCubeShader,
// .pName = "main",
// },
// .layout = pipelineLayout,
// },
// vk::ComputePipelineCreateInfo{
// .stage =
// {
// .stage = vk::ShaderStageFlagBits::eCompute,
// .module = diffuseRadianceShader,
// .pName = "main",
// },
// .layout = pipelineLayout,
// },
// vk::ComputePipelineCreateInfo{
// .stage =
// {
// .stage = vk::ShaderStageFlagBits::eCompute,
// .module = prefilterShader,
// .pName = "main",
// },
// .layout = pipelineLayout,
// },
// vk::ComputePipelineCreateInfo{
// .stage =
// {
// .stage = vk::ShaderStageFlagBits::eCompute,
// .module = brdfLutShader,
// .pName = "main",
// },
// .layout = pipelineLayout,
// },
// };
eastl::array<vk::Pipeline, computePipelineCreateInfo.size()> pipelines;
AbortIfFailed(pDevice->m_Device.createComputePipelines(pDevice->m_PipelineCache, Cast<u32>(computePipelineCreateInfo.size()),
computePipelineCreateInfo.data(), nullptr,
pipelines.data()));
// eastl::array<vk::Pipeline, computePipelineCreateInfo.size()> pipelines;
// AbortIfFailed(pDevice->m_Device.createComputePipelines(
// pDevice->m_PipelineCache, static_cast<u32>(computePipelineCreateInfo.size()),
// computePipelineCreateInfo.data(), nullptr, pipelines.data()));
vk::Pipeline eqRectToCubePipeline = pipelines[0];
vk::Pipeline diffuseIrradiancePipeline = pipelines[1];
vk::Pipeline prefilterPipeline = pipelines[2];
vk::Pipeline brdfLutPipeline = pipelines[3];
// vk::Pipeline eqRectToCubePipeline = pipelines[0];
// vk::Pipeline diffuseIrradiancePipeline = pipelines[1];
// vk::Pipeline prefilterPipeline = pipelines[2];
// vk::Pipeline brdfLutPipeline = pipelines[3];
for (auto &createInfos : computePipelineCreateInfo)
Pipeline eqRectToCubePipeline;
if (auto result =
device.CreateComputePipeline(eqRectToCubePipeline, {
.m_Shader =
{
.m_ShaderFile = EQUIRECT_TO_CUBE_SHADER_FILE,
.m_EntryPoints = {"main"},
},
.m_Name = "EqRect -> Cubemap",
}))
{
pDevice->m_Device.destroy(createInfos.stage.module, nullptr);
ERROR("EqRect -> Cubemap Pipeline Creation failed. Cause: {}", result.What()) THEN_ABORT(result.Value());
}
Pipeline diffuseIrradiancePipeline;
if (auto result = device.CreateComputePipeline(
diffuseIrradiancePipeline,
{{.m_ShaderFile = ENVIRONMENT_SHADER_FILE, .m_EntryPoints = {DIFFUSE_IRRADIANCE_ENTRY}},
"DiffuseIrradiance"}))
{
ERROR("Diffuse Irradiance compute pipeline creation failed. Cause: {}", result.What())
THEN_ABORT(result.Value());
}
Pipeline prefilterPipeline;
if (auto result = device.CreateComputePipeline(
prefilterPipeline,
{{.m_ShaderFile = ENVIRONMENT_SHADER_FILE, .m_EntryPoints = {PREFILTER_ENTRY}}, "Prefilter"}))
{
ERROR("Prefilter compute pipeline creation failed. Cause: {}", result.What())
THEN_ABORT(result.Value());
}
Pipeline brdfLutPipeline;
if (auto result = device.CreateComputePipeline(
brdfLutPipeline, {{.m_ShaderFile = ENVIRONMENT_SHADER_FILE, .m_EntryPoints = {BRDF_LUT_ENTRY}}, "BRDF"}))
{
ERROR("BRDF LUT compute pipeline creation failed. Cause: {}", result.What())
THEN_ABORT(result.Value());
}
#pragma endregion
@ -278,115 +328,68 @@ CreateCubeFromHdrEnv(AssetLoader *assetLoader, vk::Queue computeQueue, const u32
DiffuseIrradiancePushConstants diffuseIrradiancePushConstants = {
.m_SkyboxHandle = skyboxHandle,
.m_OutputTexture = diffuseIrradianceStorageHandle,
.m_CubeSide = diffuseIrradiance.m_Extent.width,
.m_CubeSide = diffuseIrradiance->m_Extent.width,
};
PrefilterPushConstants prefilterPushConstants = {
.m_SkyboxHandle = skyboxHandle,
.m_OutputTexture = systems::NullId{},
.m_EnvSide = cubeSide,
};
BrdfLutPushConstants brdfLutPushConstants = {
.m_OutputTexture = brdfLutStorageHandle,
};
resMan->Update();
commitManager->Update();
auto cmd = assetLoader->m_CommandBuffer;
constexpr vk::CommandBufferBeginInfo beginInfo = {.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit};
AbortIfFailed(cmd.begin(&beginInfo));
auto context = assetLoader.m_Device->CreateComputeContext();
#if !defined(ASTER_NDEBUG)
StackString<128> labelName = "Eqrect -> Cubemap: ";
labelName += name ? name : "<unknown env>";
vk::DebugUtilsLabelEXT label = {
.pLabelName = labelName.c_str(),
.color = std::array{1.0f, 1.0f, 1.0f, 1.0f},
};
cmd.beginDebugUtilsLabelEXT(&label);
#endif
context.Begin();
cmd.pipelineBarrier2(&readyToWriteDependency);
context.BeginDebugRegion("Eqrect -> Cubemap");
cmd.bindDescriptorSets(vk::PipelineBindPoint::eCompute, pipelineLayout, 0, 1, &resMan->m_DescriptorSet, 0, nullptr);
cmd.bindPipeline(vk::PipelineBindPoint::eCompute, eqRectToCubePipeline);
cmd.pushConstants(pipelineLayout, vk::ShaderStageFlagBits::eCompute, 0, sizeof skyboxPushConstant,
&skyboxPushConstant);
assert(skybox.m_Extent.width % 16 == 0 && skybox.m_Extent.height % 16 == 0);
cmd.dispatch(skybox.m_Extent.width / 16, skybox.m_Extent.height / 16, 6);
context.Dependency(readyToWriteDependency);
GenerateMipMaps(cmd, &skybox, vk::ImageLayout::eGeneral, vk::ImageLayout::eGeneral,
assert(skybox->m_Extent.width % 16 == 0 && skybox->m_Extent.height % 16 == 0);
context.Dispatch(eqRectToCubePipeline, skybox->m_Extent.width / 16, skybox->m_Extent.height / 16, 6,
skyboxPushConstant);
GenerateMipMaps(context, skybox, vk::ImageLayout::eGeneral, vk::ImageLayout::eGeneral,
vk::PipelineStageFlagBits2::eComputeShader, vk::PipelineStageFlagBits2::eComputeShader);
cmd.bindPipeline(vk::PipelineBindPoint::eCompute, diffuseIrradiancePipeline);
cmd.pushConstants(pipelineLayout, vk::ShaderStageFlagBits::eCompute, 0, sizeof skyboxPushConstant,
&diffuseIrradiancePushConstants);
assert(diffuseIrradiance.m_Extent.width % 16 == 0 && diffuseIrradiance.m_Extent.height % 16 == 0);
cmd.dispatch(diffuseIrradiance.m_Extent.width / 16, diffuseIrradiance.m_Extent.width / 16, 6);
assert(diffuseIrradiance->m_Extent.width % 16 == 0 && diffuseIrradiance->m_Extent.height % 16 == 0);
context.Dispatch(diffuseIrradiancePipeline, diffuseIrradiance->m_Extent.width / 16,
diffuseIrradiance->m_Extent.width / 16, 6, diffuseIrradiancePushConstants);
cmd.pipelineBarrier2(&diffIrrToSampleDependency);
context.Dependency(diffIrrToSampleDependency);
cmd.bindPipeline(vk::PipelineBindPoint::eCompute, prefilterPipeline);
u32 mipSize = prefilterCube.m_Extent.width;
u32 mipSize = prefilterCube->m_Extent.width;
assert(mipSize % 16 == 0);
for (u32 mipCount = 0; auto &tex : prefilterStorageHandles)
{
prefilterPushConstants.m_OutputTexture = tex;
prefilterPushConstants.m_CubeSide = mipSize;
prefilterPushConstants.m_Roughness = Cast<f32>(mipCount) / Cast<f32>(prefilterMipCountMax);
cmd.pushConstants(pipelineLayout, vk::ShaderStageFlagBits::eCompute, 0, sizeof prefilterPushConstants,
&prefilterPushConstants);
prefilterPushConstants.m_Roughness = static_cast<f32>(mipCount) / static_cast<f32>(prefilterMipCountMax - 1);
u32 groupCount = eastl::max(mipSize / 16u, 1u);
cmd.dispatch(groupCount, groupCount, 6);
context.Dispatch(prefilterPipeline, groupCount, groupCount, 6, prefilterPushConstants);
++mipCount;
mipSize = mipSize >> 1;
}
cmd.pipelineBarrier2(&skyboxToSampleDependency);
cmd.pipelineBarrier2(&prefilterToSampleDependency);
context.Dependency(skyboxToSampleDependency);
context.Dependency(prefilterToSampleDependency);
cmd.bindPipeline(vk::PipelineBindPoint::eCompute, brdfLutPipeline);
cmd.pushConstants(pipelineLayout, vk::ShaderStageFlagBits::eCompute, 0, sizeof brdfLutPushConstants,
&brdfLutPushConstants);
assert(brdfLut.m_Extent.width % 16 == 0 && brdfLut.m_Extent.height % 16 == 0);
cmd.dispatch(brdfLut.m_Extent.width / 16, brdfLut.m_Extent.height / 16, 1);
assert(brdfLut->m_Extent.width % 16 == 0 && brdfLut->m_Extent.height % 16 == 0);
context.Dispatch(brdfLutPipeline, brdfLut->m_Extent.width / 16, brdfLut->m_Extent.height / 16, 1,
brdfLutPushConstants);
#if !defined(ASTER_NDEBUG)
cmd.endDebugUtilsLabelEXT();
#endif
context.EndDebugRegion();
AbortIfFailed(cmd.end());
context.End();
vk::SubmitInfo submitInfo = {
.waitSemaphoreCount = 0,
.pWaitDstStageMask = nullptr,
.commandBufferCount = 1,
.pCommandBuffers = &cmd,
};
vk::Fence fence;
vk::FenceCreateInfo fenceCreateInfo = {};
AbortIfFailed(pDevice->m_Device.createFence(&fenceCreateInfo, nullptr, &fence));
AbortIfFailed(computeQueue.submit(1, &submitInfo, fence));
AbortIfFailed(pDevice->m_Device.waitForFences(1, &fence, true, MaxValue<u32>));
pDevice->m_Device.destroy(fence, nullptr);
AbortIfFailed(pDevice->m_Device.resetCommandPool(assetLoader->m_CommandPool, {}));
skybox = {};
resMan->Release(skyboxStorageHandle);
resMan->Release(diffuseIrradianceStorageHandle);
resMan->Release(brdfLutStorageHandle);
for (auto &texHandles : prefilterStorageHandles)
{
StorageTextureCube st;
resMan->Release(&st, texHandles);
pDevice->m_Device.destroy(st.m_View, nullptr);
}
for (auto &pipeline : pipelines)
{
pDevice->m_Device.destroy(pipeline, nullptr);
}
pDevice->m_Device.destroy(pipelineLayout, nullptr);
auto receipt = device.Submit(context);
device.WaitOn(receipt);
return {
.m_Skybox = skyboxHandle,

View File

@ -1,12 +1,14 @@
// =============================================
// Aster: ibl_helpers.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "aster/aster.h"
#include "gpu_resource_manager.h"
#include "aster/core/image.h"
#include "aster/core/image_view.h"
#include "aster/systems/resource.h"
struct Pipeline;
struct Texture;
@ -15,14 +17,10 @@ struct AssetLoader;
struct Environment
{
TextureHandle m_Skybox;
TextureHandle m_Diffuse;
TextureHandle m_Prefilter;
TextureHandle m_BrdfLut;
void Destroy(GpuResourceManager *resourceManager);
systems::ResId<TextureView> m_Skybox;
systems::ResId<TextureView> m_Diffuse;
systems::ResId<TextureView> m_Prefilter;
systems::ResId<TextureView> m_BrdfLut;
};
Environment
CreateCubeFromHdrEnv(AssetLoader *assetLoader, vk::Queue computeQueue, u32 cubeSide, TextureHandle hdrEnv,
cstr name = nullptr);
Environment CreateCubeFromHdrEnv(AssetLoader &assetLoader, u32 cubeSide, systems::ResId<TextureView> hdrEnv);

View File

@ -1,35 +1,16 @@
// =============================================
// Aster: light_manager.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "light_manager.h"
#include "aster/core/buffer.h"
#include "aster/systems/commit_manager.h"
#include "aster/systems/rendering_device.h"
#include "aster/systems/resource.h"
#include "glm/ext/matrix_transform.hpp"
struct Light
{
union {
vec3 um_Position;
vec3 um_Direction;
};
f32 m_Range; // < 0.0 for invalid
u32 m_Color_; // LSB is used for flags. (R G B Flags)
f32 m_Intensity;
constexpr static u32 MAX_GEN = 0x40;
constexpr static u32 GEN_MASK = MAX_GEN - 1;
constexpr static u32 TYPE_MASK = 0xC0;
constexpr static u32 TYPE_INVALID = 0x0;
constexpr static u32 TYPE_DIRECTIONAL = 1 << 6;
constexpr static u32 TYPE_POINT = 2 << 6;
constexpr static u32 TYPE_SPOT = 3 << 6; // Currently Unused
constexpr static u32 COLOR_MASK = ~(GEN_MASK | TYPE_MASK);
};
// Static Checks
// Ensure layouts are exact.
@ -53,29 +34,29 @@ static_assert((Light::TYPE_MASK & Light::TYPE_SPOT) == Light::TYPE_SPOT);
static_assert(Light::COLOR_MASK == 0xFFFFFF00);
inline u32
ToColor32(const vec4 &col)
ToColor32(vec4 const &col)
{
const u32 r = Cast<u32>(eastl::min(col.r, 1.0f) * 255.99f);
const u32 g = Cast<u32>(eastl::min(col.g, 1.0f) * 255.99f);
const u32 b = Cast<u32>(eastl::min(col.b, 1.0f) * 255.99f);
const u32 a = Cast<u32>(eastl::min(col.a, 1.0f) * 255.99f);
u32 const r = static_cast<u32>(eastl::min(col.r, 1.0f) * 255.99f);
u32 const g = static_cast<u32>(eastl::min(col.g, 1.0f) * 255.99f);
u32 const b = static_cast<u32>(eastl::min(col.b, 1.0f) * 255.99f);
u32 const a = static_cast<u32>(eastl::min(col.a, 1.0f) * 255.99f);
return r << 24 | g << 16 | b << 8 | a;
}
inline u32
ToColor32(const vec3 &col)
ToColor32(vec3 const &col)
{
const u32 r = Cast<u32>(eastl::min(col.r, 1.0f) * 255.99f);
const u32 g = Cast<u32>(eastl::min(col.g, 1.0f) * 255.99f);
const u32 b = Cast<u32>(eastl::min(col.b, 1.0f) * 255.99f);
u32 const r = static_cast<u32>(eastl::min(col.r, 1.0f) * 255.99f);
u32 const g = static_cast<u32>(eastl::min(col.g, 1.0f) * 255.99f);
u32 const b = static_cast<u32>(eastl::min(col.b, 1.0f) * 255.99f);
constexpr u32 a = 255;
return r << 24 | g << 16 | b << 8 | a;
}
LightManager::LightManager(GpuResourceManager *resourceManager)
: m_ResourceManager{resourceManager}
LightManager::LightManager(systems::RenderingDevice &device)
: m_Device{&device}
, m_DirectionalLightCount{}
, m_PointLightCount{}
, m_MetaInfo{}
@ -83,41 +64,10 @@ LightManager::LightManager(GpuResourceManager *resourceManager)
{
}
LightManager::~LightManager()
{
m_ResourceManager->Release(m_MetaInfo.m_LightBuffer);
}
LightManager::LightManager(LightManager &&other) noexcept
: m_ResourceManager(other.m_ResourceManager)
, m_Lights(std::move(other.m_Lights))
, m_DirectionalLightCount(other.m_DirectionalLightCount)
, m_PointLightCount(other.m_PointLightCount)
, m_MetaInfo(other.m_MetaInfo)
, m_GpuBufferCapacity_(other.m_GpuBufferCapacity_)
{
other.m_MetaInfo.m_LightBuffer = {};
}
LightManager &
LightManager::operator=(LightManager &&other) noexcept
{
if (this == &other)
return *this;
m_ResourceManager = other.m_ResourceManager;
m_Lights = std::move(other.m_Lights);
m_DirectionalLightCount = other.m_DirectionalLightCount;
m_PointLightCount = other.m_PointLightCount;
m_MetaInfo = other.m_MetaInfo;
other.m_MetaInfo.m_LightBuffer = {};
m_GpuBufferCapacity_ = other.m_GpuBufferCapacity_;
return *this;
}
LightHandle
LightManager::AddDirectional(const vec3 &direction, const vec3 &color, f32 intensity)
LightManager::AddDirectional(vec3 const &direction, vec3 const &color, f32 intensity)
{
const vec3 normDirection = normalize(direction);
vec3 const normDirection = normalize(direction);
if (m_DirectionalLightCount < m_MetaInfo.m_DirectionalLightMaxCount)
{
u16 index = 0;
@ -125,7 +75,7 @@ LightManager::AddDirectional(const vec3 &direction, const vec3 &color, f32 inten
{
if (light.m_Range < 0)
{
const u8 gen = light.m_Color_ & Light::GEN_MASK;
u8 const gen = light.m_Color_ & Light::GEN_MASK;
light.m_Color_ = (ToColor32(color) & Light::COLOR_MASK) | Light::TYPE_DIRECTIONAL | gen;
light.m_Range = 1.0f;
@ -145,8 +95,8 @@ LightManager::AddDirectional(const vec3 &direction, const vec3 &color, f32 inten
if (m_DirectionalLightCount == m_MetaInfo.m_DirectionalLightMaxCount &&
m_MetaInfo.m_DirectionalLightMaxCount == m_MetaInfo.m_PointLightOffset)
{
const u16 oldPointLightOffset = m_MetaInfo.m_PointLightOffset;
const u32 pointLightMaxCount = m_MetaInfo.m_PointLightMaxCount;
u16 const oldPointLightOffset = m_MetaInfo.m_PointLightOffset;
u32 const pointLightMaxCount = m_MetaInfo.m_PointLightMaxCount;
// Might cause a capacity increase, but I want to use that for my gpu buffer resize.
m_Lights.push_back();
m_Lights.push_back();
@ -172,7 +122,7 @@ LightManager::AddDirectional(const vec3 &direction, const vec3 &color, f32 inten
m_Lights[m_DirectionalLightCount].m_Range = 1.0f;
m_Lights[m_DirectionalLightCount].um_Direction = normDirection;
m_Lights[m_DirectionalLightCount].m_Intensity = intensity;
const u16 index = m_DirectionalLightCount;
u16 const index = m_DirectionalLightCount;
++m_DirectionalLightCount;
++m_MetaInfo.m_DirectionalLightMaxCount;
@ -181,7 +131,7 @@ LightManager::AddDirectional(const vec3 &direction, const vec3 &color, f32 inten
}
LightHandle
LightManager::AddPoint(const vec3 &position, const vec3 &color, const f32 radius, f32 intensity)
LightManager::AddPoint(vec3 const &position, vec3 const &color, f32 const radius, f32 intensity)
{
assert(m_PointLightCount <= m_MetaInfo.m_PointLightMaxCount);
assert(radius >= 0.0f);
@ -192,7 +142,7 @@ LightManager::AddPoint(const vec3 &position, const vec3 &color, const f32 radius
{
if (light->m_Range < 0)
{
const u8 gen = light->m_Color_ & Light::GEN_MASK;
u8 const gen = light->m_Color_ & Light::GEN_MASK;
light->m_Color_ = (ToColor32(color) & Light::COLOR_MASK) | Light::TYPE_POINT | gen;
light->m_Range = radius;
@ -201,7 +151,7 @@ LightManager::AddPoint(const vec3 &position, const vec3 &color, const f32 radius
m_GpuBufferCapacity_ |= UPDATE_REQUIRED_BIT;
return {Light::TYPE_POINT, gen, Cast<u16>(index)};
return {Light::TYPE_POINT, gen, static_cast<u16>(index)};
}
++light;
}
@ -210,7 +160,7 @@ LightManager::AddPoint(const vec3 &position, const vec3 &color, const f32 radius
}
m_Lights.push_back();
const u16 index = m_PointLightCount;
u16 const index = m_PointLightCount;
Light *light = &m_Lights[index + m_MetaInfo.m_PointLightOffset];
constexpr u8 gen = 0; // New light
@ -231,31 +181,29 @@ LightManager::AddPoint(const vec3 &position, const vec3 &color, const f32 radius
void
LightManager::Update()
{
const u16 requiredBufferCapacity = eastl::min(Cast<u16>(m_Lights.capacity()), MAX_LIGHTS);
u16 const requiredBufferCapacity = eastl::min(static_cast<u16>(m_Lights.capacity()), MAX_LIGHTS);
if ((m_GpuBufferCapacity_ & CAPACITY_MASK) < requiredBufferCapacity)
{
StorageBuffer newBuffer;
newBuffer.Init(m_ResourceManager->m_Device, requiredBufferCapacity * sizeof m_Lights[0], true, "Light Buffer");
m_LightBuffer = m_Device->CreateStorageBuffer(requiredBufferCapacity * sizeof m_Lights[0], "Light Buffer");
m_GpuBufferCapacity_ = requiredBufferCapacity | UPDATE_REQUIRED_BIT;
m_ResourceManager->Release(m_MetaInfo.m_LightBuffer);
m_MetaInfo.m_LightBuffer = m_ResourceManager->Commit(&newBuffer);
m_MetaInfo.m_LightBuffer = m_LightBuffer->GetDeviceAddress();
}
if (m_GpuBufferCapacity_ & UPDATE_REQUIRED_BIT)
{
m_ResourceManager->Write(m_MetaInfo.m_LightBuffer, 0, m_Lights.size() * sizeof m_Lights[0], m_Lights.data());
m_LightBuffer->Write(0, m_Lights.size() * sizeof m_Lights[0], m_Lights.data());
}
}
void
LightManager::RemoveLight(const LightHandle handle)
LightManager::RemoveLight(LightHandle const handle)
{
const u8 handleGen = handle.m_Generation;
u8 const handleGen = handle.m_Generation;
if (handle.m_Type == Light::TYPE_DIRECTIONAL)
{
Light *lightSlot = &m_Lights[handle.m_Index];
const u8 slotGen = lightSlot->m_Color_ & Light::GEN_MASK;
u8 const slotGen = lightSlot->m_Color_ & Light::GEN_MASK;
if (slotGen > handleGen)
{
WARN("Invalid handle gen: {} being freed. (slot gen: {})", handleGen, slotGen);
@ -270,7 +218,7 @@ LightManager::RemoveLight(const LightHandle handle)
if (handle.m_Type == Light::TYPE_POINT)
{
Light *lightSlot = &m_Lights[handle.m_Index + m_MetaInfo.m_PointLightOffset];
const u8 slotGen = lightSlot->m_Color_ & Light::GEN_MASK;
u8 const slotGen = lightSlot->m_Color_ & Light::GEN_MASK;
if (slotGen > handleGen)
{
WARN("Invalid handle gen: {} being freed. (slot gen: {})", handleGen, slotGen);

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: light_manager.h
// Copyright (c) 2020-2024 Anish Bhobe
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
@ -8,7 +8,21 @@
#include "aster/aster.h"
// TODO: Separate files so you only import handles.
#include "gpu_resource_manager.h"
#include "aster/core/buffer.h"
#include "aster/systems/resource.h"
#include <EASTL/vector.h>
namespace systems
{
class RenderingDevice;
}
namespace systems
{
class ResourceManager;
class CommitManager;
} // namespace systems
struct DirectionalLight
{
@ -33,26 +47,51 @@ struct LightHandle
u16 m_Index;
};
struct Light;
struct Light
{
union {
vec3 um_Position;
vec3 um_Direction;
};
f32 m_Range; // < 0.0 for invalid
u32 m_Color_; // LSB is used for flags. (R G B Flags)
f32 m_Intensity;
u32 m_Pad0;
u32 m_Pad1;
constexpr static u32 MAX_GEN = 0x40;
constexpr static u32 GEN_MASK = MAX_GEN - 1;
constexpr static u32 TYPE_MASK = 0xC0;
constexpr static u32 TYPE_INVALID = 0x0;
constexpr static u32 TYPE_DIRECTIONAL = 1 << 6;
constexpr static u32 TYPE_POINT = 2 << 6;
constexpr static u32 TYPE_SPOT = 3 << 6; // Currently Unused
constexpr static u32 COLOR_MASK = ~(GEN_MASK | TYPE_MASK);
};
struct LightManager
{
constexpr static u16 MAX_LIGHTS = MaxValue<u16>;
struct LightMetaInfo
{
// The number of directional lights is relatively low (1 - 2) and will almost never change in a scene.
// We can use that with Offset = 0, and point light at further offsets.
// This way we don't need to move point lights often.
BufferHandle m_LightBuffer; // 04 04
u16 m_PointLightMaxCount; // 02 06
u16 m_PointLightOffset; // 02 08
u16 m_DirectionalLightMaxCount; // 02 10
u16 m_UnusedPadding0 = 0; // 02 12
uptr m_LightBuffer; // 08 08
u16 m_PointLightMaxCount; // 02 10
u16 m_PointLightOffset; // 02 12
u16 m_DirectionalLightMaxCount; // 02 14
u16 m_UnusedPadding0 = 0; // 02 16
};
GpuResourceManager *m_ResourceManager;
systems::RenderingDevice *m_Device;
eastl::vector<Light> m_Lights;
Ref<Buffer> m_LightBuffer;
// We don't need a Directional Light free list. We will just brute force iterate.
u16 m_DirectionalLightCount;
@ -66,18 +105,18 @@ struct LightManager
// Using lower bit. Capacity can be directly a multiple of 2
// Thus, range is up to MaxValue<u16>
constexpr static u16 UPDATE_REQUIRED_BIT = 1;
constexpr static u16 CAPACITY_MASK = Cast<u16>(~UPDATE_REQUIRED_BIT);
constexpr static u16 CAPACITY_MASK = static_cast<u16>(~UPDATE_REQUIRED_BIT);
LightHandle AddDirectional(const vec3 &direction, const vec3 &color, f32 intensity);
LightHandle AddPoint(const vec3 &position, const vec3 &color, f32 radius, f32 intensity);
void Update();
void RemoveLight(LightHandle handle);
explicit LightManager(GpuResourceManager *resourceManager);
~LightManager();
~LightManager() = default;
LightManager(LightManager &&other) noexcept;
LightManager &operator=(LightManager &&other) noexcept;
explicit LightManager(systems::RenderingDevice &device);
LightManager(LightManager &&other) noexcept = default;
LightManager &operator=(LightManager &&other) noexcept = default;
DISALLOW_COPY_AND_ASSIGN(LightManager);
};

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show More