Compare commits

..

No commits in common. "new-arch" and "canon" have entirely different histories.

204 changed files with 6784 additions and 17127 deletions

1
.gitignore vendored
View File

@ -5,4 +5,3 @@ build/
.direnv/
.ccls-cache/
*.user
/vcpkg_installed

View File

@ -4,12 +4,12 @@ cmake_minimum_required(VERSION 3.13)
project(Aster VERSION 0.1.0)
set(CMAKE_CXX_STANDARD 23)
set(CMAKE_CXX_STANDARD 20)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
if (MSVC)
set(CMAKE_CXX_FLAGS "/W4 /GR- ${MSVC_FLAGS} /utf-8")
set(CMAKE_CXX_FLAGS "/W4 /GR- ${MSVC_FLAGS}")
set(CMAKE_CXX_FLAGS_RELEASE "/O3")
add_compile_definitions(_HAS_EXCEPTIONS=0)
add_compile_definitions(_CRT_SECURE_NO_WARNINGS)

View File

@ -1,27 +1,30 @@
function(add_shader TARGET SHADER)
find_package(Vulkan REQUIRED COMPONENTS dxc)
get_filename_component(vulkan-bin-dir ${Vulkan_GLSLC_EXECUTABLE} DIRECTORY)
find_program(slangc_exe NAMES "slangc")
if (NOT slangc_exe STREQUAL "slangc_exe-NOTFOUND")
set(slangc_exe_FOUND true)
endif()
get_filename_component(shader-ext ${SHADER} LAST_EXT)
get_filename_component(shader-inner ${SHADER} NAME_WLE)
get_filename_component(shader-type ${shader-inner} LAST_EXT)
string(REPLACE "." "" shader-type ${shader-type})
set(current-shader-path ${CMAKE_CURRENT_SOURCE_DIR}/${SHADER})
set(current-output-path ${CMAKE_CURRENT_BINARY_DIR}/${SHADER}.slang-module)
set(current-copy-path ${CMAKE_CURRENT_BINARY_DIR}/${SHADER})
set(current-output-path ${CMAKE_CURRENT_BINARY_DIR}/${SHADER}.spv)
get_filename_component(current-output-dir ${current-output-path} DIRECTORY)
file(MAKE_DIRECTORY ${current-output-dir})
if (${shader-ext} STREQUAL ".slang")
if (Vulkan_dxc_exe_FOUND AND ${shader-ext} STREQUAL ".hlsl")
message("Marked as hlsl file. ${current-output-path}")
add_custom_command(
OUTPUT ${current-output-path} ${current-copy-path}
COMMAND ${slangc_exe} ${current-shader-path} -o ${current-output-path}
COMMAND ${CMAKE_COMMAND} -E copy ${current-shader-path} ${current-copy-path}
OUTPUT ${current-output-path}
COMMAND Vulkan::dxc_exe ${DXC_SHADER_FLAGS} -spirv -T "${shader-type}_6_0" -E main ${current-shader-path} -Fo ${current-output-path}
DEPENDS ${current-shader-path}
IMPLICIT_DEPENDS CXX ${current-shader-path}
VERBATIM)
elseif (Vulkan_glslc_FOUND AND ${shader-ext} STREQUAL ".glsl")
message("Marked as glsl file. ${current-output-path}")
add_custom_command(
OUTPUT ${current-output-path}
COMMAND Vulkan::glslc ${GLSLC_SHADER_FLAGS} -o ${current-output-path} ${current-shader-path}
DEPENDS ${current-shader-path}
IMPLICIT_DEPENDS CXX ${current-shader-path}
VERBATIM)
@ -31,3 +34,9 @@ function(add_shader TARGET SHADER)
set_source_files_properties(${current-output-path} PROPERTIES GENERATED TRUE)
target_sources(${TARGET} PRIVATE ${current-output-path})
endfunction(add_shader)
function(add_shaders TARGET SHADERS)
foreach(shader IN ${SHADERS})
add_shader(TARGET ${shader})
endforeach()
endfunction(add_shaders)

View File

@ -9,15 +9,13 @@ find_package(Vulkan REQUIRED)
find_package(fmt CONFIG REQUIRED)
find_package(VulkanMemoryAllocator CONFIG REQUIRED)
find_package(EASTL CONFIG REQUIRED)
find_library(slang NAMES "slang" CONFIG REQUIRED)
find_package(foonathan_memory CONFIG REQUIRED)
add_library(aster_core STATIC)
add_subdirectory("include")
add_subdirectory("src")
target_compile_features(aster_core PUBLIC cxx_std_23)
set_property(TARGET aster_core PROPERTY CXX_STANDARD 20)
target_include_directories(aster_core PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/include/aster")
target_include_directories(aster_core PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include")
@ -28,6 +26,4 @@ target_include_directories(aster_core PRIVATE ${SCOTTT_DEBUGBREAK_INCLUDE_DIRS})
target_link_libraries(aster_core PRIVATE fmt::fmt)
target_link_libraries(aster_core PRIVATE EASTL)
target_link_libraries(aster_core PUBLIC Vulkan::Headers GPUOpen::VulkanMemoryAllocator)
target_link_libraries(aster_core PUBLIC ${slang})
target_link_libraries(aster_core PRIVATE foonathan_memory)

View File

@ -7,7 +7,7 @@ INTERFACE
"global.h"
"constants.h"
"config.h"
"instance.h"
"context.h"
"physical_device.h"
"device.h"
"swapchain.h"
@ -15,9 +15,7 @@ INTERFACE
"queue_allocation.h"
"buffer.h"
"image.h"
"image_view.h"
"surface.h"
"size.h"
"type_traits.h"
"window.h"
"sampler.h")
"window.h")

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: buffer.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
@ -9,110 +9,125 @@
struct Device;
/// A Vulkan buffer wrapper.
// TODO Refactor the Buffer Hierarchy
struct Buffer
{
enum class FlagBits : u8
{
eNone = 0x0,
eStaging = 0x1,
eUniform = 0x2,
eStorage = 0x4,
eIndex = 0x8,
eVertex = 0x10,
eIndirect = 0x20,
};
using Flags = vk::Flags<FlagBits>;
constexpr static Flags FLAGS = {};
Device const *m_Device = nullptr;
vk::Buffer m_Buffer = nullptr;
VmaAllocation m_Allocation = nullptr;
u8 *m_Mapped = nullptr; ///< If the buffer is host visible, it should be (and stay) mapped.
uptr m_DeviceAddr = 0;
usize m_Size = 0;
Flags m_Flags = {};
/// @returns True if it is a valid vulkan buffer.
[[nodiscard]] bool
IsValid() const
{
return m_Buffer;
}
// If the buffer is host visible, it should be (and stay) mapped.
u8 *m_Mapped = nullptr;
/// If the buffer is host visible, it should be (and stay) mapped.
/// @returns True if the buffer is host-visible and mapped.
[[nodiscard]] bool
IsMapped() const
{
return m_Mapped;
}
[[nodiscard]] usize GetSize() const;
[[nodiscard]] bool IsHostVisible() const;
[[nodiscard]] bool IsValid() const;
[[nodiscard]] bool IsMapped() const;
[[nodiscard]] bool IsOwned() const;
[[nodiscard]] bool IsCommitted() const;
void SetCommitted(bool committed);
/// Writes the data to the buffer.
/// @note The buffer must be mapped.
void Write(usize offset, usize size, void const *data) const;
void Destroy(const Device *device);
void Write(const Device *device, usize offset, usize size, const void *data);
/// If Buffer Device Address is enabled,
/// Get a pointer.
[[nodiscard]] uptr GetDeviceAddress() const;
void Allocate(const Device *device, usize size, vk::BufferUsageFlags bufferUsage,
VmaAllocationCreateFlags allocationFlags, VmaMemoryUsage memoryUsage, cstr name);
// Constructors
uptr
GetDeviceAddress(const Device *device);
Buffer(Device const *device, usize size, vk::BufferUsageFlags bufferUsage, VmaAllocationCreateFlags allocationFlags,
VmaMemoryUsage memoryUsage, cstr name);
// Buffer.size is used for bookkeeping
// If the buffer is Invalid, the remaining data in Buffer is used intrusively by `GpuResourceManager`.
usize m_Size_ = 0;
Buffer(Buffer &&other) noexcept;
Buffer &operator=(Buffer &&other) noexcept;
~Buffer();
DISALLOW_COPY_AND_ASSIGN(Buffer);
constexpr static usize VALID_BUFFER_BIT = Cast<usize>(1llu << 63);
constexpr static usize OWNED_BIT = 1llu << 62;
constexpr static usize COMMITTED_BIT = 1llu << 61;
constexpr static usize SIZE_MASK = ~(VALID_BUFFER_BIT | OWNED_BIT | COMMITTED_BIT);
};
template <>
constexpr bool concepts::GpuResource<Buffer> = true;
// Ensure that m_Size doesn't get used intrusively since it manages the state.
static_assert(offsetof(Buffer, m_Size_) > sizeof(usize));
struct UniformBuffer : Buffer
{
constexpr static Flags FLAGS = FlagBits::eUniform;
void Init(const Device *device, usize size, cstr name = nullptr);
};
struct StorageBuffer : Buffer
{
constexpr static Flags FLAGS = FlagBits::eStorage;
void Init(const Device *device, usize size, bool hostVisible, cstr name = nullptr);
void Init(const Device *device, usize size, bool hostVisible, bool deviceAddress, cstr name = nullptr);
};
struct IndirectBuffer : Buffer
{
constexpr static Flags FLAGS = FlagBits::eIndirect;
void Init(const Device *device, usize size, bool hostVisible, cstr name = nullptr);
};
struct StorageIndexBuffer : StorageBuffer
{
void Init(const Device *device, usize size, bool hostVisible, bool deviceAddress, cstr name = nullptr);
};
struct VertexBuffer : Buffer
{
constexpr static Flags FLAGS = FlagBits::eVertex;
void Init(const Device *device, usize size, cstr name = nullptr);
void Write(const Device *device, void *data, usize size, usize offset) const = delete;
};
struct IndexBuffer : Buffer
{
constexpr static Flags FLAGS = FlagBits::eIndex;
void Init(const Device *device, usize size, cstr name = nullptr);
void Write(const Device *device, void *data, usize size, usize offset) const = delete;
};
struct StagingBuffer : Buffer
{
constexpr static Flags FLAGS = FlagBits::eStaging;
void Init(const Device *device, usize size, cstr name = nullptr);
};
namespace concepts
inline usize
Buffer::GetSize() const
{
template <typename T>
concept AnyBuffer = std::derived_from<T, Buffer>;
return m_Size_ & SIZE_MASK;
}
template <typename T, typename TInto>
concept BufferInto = std::derived_from<T, Buffer> and std::derived_from<TInto, Buffer> and
(static_cast<bool>(T::FLAGS & TInto::FLAGS) or std::same_as<Buffer, TInto>);
inline bool
Buffer::IsHostVisible() const
{
return IsMapped();
}
template <typename T>
concept AnyBufferRef = Deref<T> and AnyBuffer<DerefType<T>>;
inline bool
Buffer::IsValid() const
{
return m_Size_ & VALID_BUFFER_BIT;
}
template <typename T, typename TTo>
concept BufferRefTo = Deref<T> and BufferInto<DerefType<T>, TTo>;
inline bool
Buffer::IsMapped() const
{
return m_Mapped;
}
} // namespace concepts
inline bool
Buffer::IsOwned() const
{
return m_Size_ & OWNED_BIT;
}
inline bool
Buffer::IsCommitted() const
{
return m_Size_ & COMMITTED_BIT;
}
inline void
Buffer::SetCommitted(const bool committed)
{
m_Size_ = committed ? (m_Size_ | COMMITTED_BIT) : (m_Size_ & ~COMMITTED_BIT);
}

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: config.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
@ -15,8 +15,6 @@
#define VULKAN_HPP_NO_STRUCT_CONSTRUCTORS
#define VULKAN_HPP_DISABLE_ENHANCED_MODE 1
#define VULKAN_HPP_NO_EXCEPTIONS 1
#define VULKAN_HPP_NO_SMART_HANDLE 1
#define VULKAN_HPP_NO_STRUCT_SETTERS 1
#define VMA_STATIC_VULKAN_FUNCTIONS 0
#define VMA_DYNAMIC_VULKAN_FUNCTIONS 1

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: constants.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
@ -13,8 +13,6 @@
#include <glm/glm.hpp>
#include <glm/gtx/quaternion.hpp>
#include <atomic>
using c8 = char;
using u8 = uint8_t;
using u16 = uint16_t;
@ -30,9 +28,8 @@ using f128 = long double;
using b8 = bool;
using b32 = u32;
using usize = size_t;
using isize = intptr_t;
using uptr = uintptr_t;
using cstr = char const *;
using cstr = const char *;
namespace ansi_color
{
@ -47,28 +44,42 @@ constexpr auto White = "\u001b[37m";
constexpr auto Reset = "\u001b[0m";
} // namespace ansi_color
template <typename TType, typename TFrom>
constexpr auto
Cast(TFrom &&in)
{
return static_cast<TType>(std::forward<TFrom>(in));
}
template <typename TType, typename TFrom>
constexpr auto
Recast(TFrom &&in)
{
return reinterpret_cast<TType>(std::forward<TFrom>(in));
}
constexpr f32
operator""_deg(long double degrees)
{
return glm::radians<f32>(static_cast<f32>(degrees));
return glm::radians<f32>(Cast<f32>(degrees));
}
constexpr f32
operator""_deg(unsigned long long int degrees)
{
return glm::radians<f32>(static_cast<f32>(degrees));
return glm::radians<f32>(Cast<f32>(degrees));
}
constexpr f32
operator""_rad(long double rads)
{
return static_cast<f32>(rads);
return Cast<f32>(rads);
}
constexpr f32
operator""_rad(unsigned long long int rads)
{
return static_cast<f32>(rads);
return Cast<f32>(rads);
}
using glm::ivec2;
@ -105,31 +116,31 @@ constexpr Version VERSION = {
};
constexpr u32
Kilobyte(u32 const in)
Kilobyte(const u32 in)
{
return in * 1024;
}
constexpr usize
Kilobyte(usize const in)
Kilobyte(const usize in)
{
return in * 1024;
}
constexpr u32
Megabyte(u32 const in)
Megabyte(const u32 in)
{
return in * 1024 * 1024;
}
constexpr usize
Megabyte(usize const in)
Megabyte(const usize in)
{
return in * 1024 * 1024;
}
constexpr usize
Gigabyte(usize const in)
Gigabyte(const usize in)
{
return in * 1024 * 1024 * 1024;
}

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: context.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
@ -8,26 +8,25 @@
#include "global.h"
/**
* @class Instance
* @class Context
*
* @brief Vulkan context to handle device initialization logic.
*
* Handles the required hardware interactions.
*/
struct Instance final
struct Context final
{
// Members
vk::Instance m_Instance = nullptr;
vk::DebugUtilsMessengerEXT m_DebugMessenger = nullptr;
// Ctor/Dtor
Instance() = default;
Instance(cstr appName, Version version, bool enableValidation = ENABLE_LAYER_MESSAGES_DEFAULT_VALUE);
~Instance();
Context(cstr appName, Version version, bool enableValidation = ENABLE_LAYER_MESSAGES_DEFAULT_VALUE);
~Context();
// Move
Instance(Instance &&other) noexcept;
Instance &operator=(Instance &&other) noexcept;
Context(Context &&other) noexcept;
Context &operator=(Context &&other) noexcept;
#if !defined(ASTER_NDEBUG)
constexpr static bool ENABLE_LAYER_MESSAGES_DEFAULT_VALUE = true;
@ -35,5 +34,5 @@ struct Instance final
constexpr static bool ENABLE_LAYER_MESSAGES_DEFAULT_VALUE = false;
#endif
DISALLOW_COPY_AND_ASSIGN(Instance);
DISALLOW_COPY_AND_ASSIGN(Context);
};

View File

@ -1,17 +1,17 @@
// =============================================
// Aster: device.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "global.h"
#include <EASTL/span.h>
#include <EASTL/vector.h>
#include <EASTL/span.h>
struct QueueAllocation;
struct Instance;
struct Context;
struct PhysicalDevice;
struct Features
@ -31,31 +31,19 @@ struct Device final
vk::PipelineCache m_PipelineCache = nullptr;
bool m_ValidationEnabled = true;
template <concepts::VkHandle T>
void SetName(T const &object, cstr name) const;
template <typename T>
requires vk::isVulkanHandleType<T>::value void SetName(const T &object, cstr name) const;
[[nodiscard]] vk::Queue GetQueue(u32 familyIndex, u32 queueIndex) const;
[[nodiscard]] eastl::vector<u8> DumpPipelineCache() const;
void WaitIdle() const;
vk::Device *
operator->()
{
return &m_Device;
}
vk::Device const *
operator->() const
{
return &m_Device;
}
// Ctor/Dtor
Device() = default;
Device(Instance const &context, PhysicalDevice &physicalDevice, Features &enabledFeatures,
eastl::span<QueueAllocation> const &queueAllocations, eastl::span<u8> const &pipelineCacheData,
NameString &&name);
Device(const Context *context, PhysicalDevice *physicalDevice, Features *enabledFeatures,
const eastl::vector<QueueAllocation> &queueAllocations, NameString &&name);
Device(const Context *context, PhysicalDevice *physicalDevice, Features *enabledFeatures,
const eastl::vector<QueueAllocation> &queueAllocations, eastl::span<u8> &&pipelineCacheData, NameString &&name);
~Device();
// Move
@ -65,15 +53,15 @@ struct Device final
DISALLOW_COPY_AND_ASSIGN(Device);
};
template <concepts::VkHandle T>
void
Device::SetName(T const &object, cstr name) const
template <typename T>
requires vk::isVulkanHandleType<T>::value void
Device::SetName(const T &object, cstr name) const
{
if (!m_ValidationEnabled || !name || !object)
return;
auto handle = reinterpret_cast<u64>(static_cast<typename T::NativeType>(object));
vk::DebugUtilsObjectNameInfoEXT const objectNameInfo = {
auto handle = Recast<u64>(Cast<typename T::NativeType>(object));
const vk::DebugUtilsObjectNameInfoEXT objectNameInfo = {
.objectType = object.objectType,
.objectHandle = handle,
.pObjectName = name,

View File

@ -1,20 +1,17 @@
// =============================================
// Aster: global.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "config.h"
#include "constants.h"
#include "aster/util/logger.h"
#include "util/logger.h"
#include <GLFW/glfw3.h>
#include <glm/glm.hpp>
#include <EASTL/shared_ptr.h>
#include <fmt/format.h>
// Macros that can collide with functions.
@ -29,8 +26,6 @@
#if !defined(NDEBUG)
#define VULKAN_HPP_ASSERT(expr) DEBUG_IF(!(expr), "Vulkan assert failed")
#endif
#include "EASTL/intrusive_ptr.h"
#include "type_traits.h"
#include <EASTL/fixed_string.h>
@ -55,70 +50,38 @@ constexpr u32 ASTER_API_VERSION = VK_API_VERSION_1_3;
#define Take(ELEMENT) eastl::exchange(ELEMENT, {})
#define TODO(...) assert(!("Unimplemented: " __VA_ARGS__))
#define FIX(...) static_assert(!("Unimplemented: " __VA_ARGS__))
#define UNREACHABLE(...) assert(!("Unreachable: " __VA_ARGS__))
#define AbortIfFailed(RESULT) \
do \
{ \
vk::Result _checkResultValue_; \
ERROR_IF(Failed(_checkResultValue_ = static_cast<vk::Result>(RESULT)), "Cause: {}", _checkResultValue_) \
THEN_ABORT(_checkResultValue_); \
} while (false)
#define AbortIfFailedMV(RESULT, MSG, EXTRA) \
do \
{ \
vk::Result _checkResultValue_; \
ERROR_IF(Failed(_checkResultValue_ = static_cast<vk::Result>(RESULT)), MSG " Cause: {}", EXTRA, \
_checkResultValue_) \
THEN_ABORT(_checkResultValue_); \
} while (false)
#define AbortIfFailedM(RESULT, MSG) \
do \
{ \
auto _checkResultValue_ = static_cast<vk::Result>(RESULT); \
ERROR_IF(Failed(_checkResultValue_), MSG " Cause: {}", _checkResultValue_) THEN_ABORT(_checkResultValue_); \
} while (false)
#define TODO(MSG) assert(false && ("Unimplemented: " MSG))
[[nodiscard]] inline bool
Failed(vk::Result const result)
Failed(const vk::Result result)
{
return result != vk::Result::eSuccess;
}
namespace concepts
{
template <typename T>
concept VkHandle = vk::isVulkanHandleType<T>::value;
}
using NameString = eastl::fixed_string<char, 32, false>;
template <typename TFlagBits>
struct eastl::hash<vk::Flags<TFlagBits>> // NOLINT(*-dcl58-cpp)
{
[[nodiscard]] usize
operator()(vk::Flags<TFlagBits> const &val)
operator()(const vk::Flags<TFlagBits> &val)
{
return std::hash<u32>()(static_cast<u32>(val));
return std::hash<u32>()(Cast<u32>(val));
}
};
template <typename T>
[[nodiscard]] usize
HashAny(T const &val)
HashAny(const T &val)
{
return eastl::hash<std::remove_cvref_t<T>>()(val);
}
[[nodiscard]] inline usize
HashCombine(usize const hash0, usize const hash1)
HashCombine(const usize hash0, const usize hash1)
{
constexpr usize saltValue = 0x9e3779b9;
usize const tempVar = hash1 + saltValue + (hash0 << 6) + (hash0 >> 2);
const usize tempVar = hash1 + saltValue + (hash0 << 6) + (hash0 >> 2);
return hash0 ^ tempVar;
}
@ -141,32 +104,32 @@ struct Time
Update()
{
ERROR_IF(std::isnan(m_Elapsed), "Time not init.");
auto const newElapsed = glfwGetTime();
const auto newElapsed = glfwGetTime();
m_Delta = std::clamp(newElapsed - m_Elapsed, 0.0, MAX_DELTA);
m_Elapsed = newElapsed;
}
};
[[nodiscard]] constexpr usize
ClosestMultiple(usize const val, usize const of)
ClosestMultiple(const usize val, const usize of)
{
return of * ((val + of - 1) / of);
}
[[nodiscard]] constexpr u32
ClosestMultiple(u32 const val, u32 const of)
ClosestMultiple(const u32 val, const u32 of)
{
return of * ((val + of - 1) / of);
}
[[nodiscard]] constexpr bool
IsPowerOfTwo(usize const val)
IsPowerOfTwo(const usize val)
{
return val && !(val & (val - 1));
}
[[nodiscard]] constexpr bool
IsPowerOfTwo(u32 const val)
IsPowerOfTwo(const u32 val)
{
return val && !(val & (val - 1));
}
@ -186,10 +149,10 @@ ClosestLargerPowerOfTwo(usize val)
}
[[nodiscard]] constexpr usize
ClosestPowerOfTwo(usize const val)
ClosestPowerOfTwo(const usize val)
{
usize const largerPo2 = ClosestLargerPowerOfTwo(val);
usize const smallerPo2 = largerPo2 >> 1;
const usize largerPo2 = ClosestLargerPowerOfTwo(val);
const usize smallerPo2 = largerPo2 >> 1;
return (smallerPo2 + largerPo2 <= (val << 1)) ? largerPo2 : smallerPo2;
}
@ -207,10 +170,10 @@ ClosestLargerPowerOfTwo(u32 val)
}
[[nodiscard]] constexpr u32
ClosestPowerOfTwo(u32 const val)
ClosestPowerOfTwo(const u32 val)
{
u32 const largerPo2 = ClosestLargerPowerOfTwo(val);
u32 const smallerPo2 = largerPo2 >> 1;
const u32 largerPo2 = ClosestLargerPowerOfTwo(val);
const u32 smallerPo2 = largerPo2 >> 1;
return (smallerPo2 + largerPo2 <= (val << 1)) ? largerPo2 : smallerPo2;
}
@ -226,20 +189,15 @@ GetMaskOffset(u32 val)
return count;
}
template <typename T>
concept VkToString = requires(T a) {
{ vk::to_string(a) } -> std::convertible_to<std::string>;
};
template <VkToString T>
struct fmt::formatter<T> : nested_formatter<std::string>
template <>
struct fmt::formatter<vk::Result> : nested_formatter<std::string>
{
auto
// ReSharper disable once CppInconsistentNaming
format(T result, format_context &ctx) const
format(vk::Result result, format_context &ctx) const
{
return write_padded(ctx,
[this, result](auto out) { return fmt::format_to(out, "{}", nested(to_string(result))); });
[this, result](auto out) { return v10::format_to(out, "{}", nested(to_string(result))); });
}
};
@ -248,14 +206,8 @@ struct fmt::formatter<eastl::fixed_string<TType, TCount, TOverflow>> : nested_fo
{
auto
// ReSharper disable once CppInconsistentNaming
format(eastl::fixed_string<TType, TCount, TOverflow> const &str, format_context &ctx) const
format(const eastl::fixed_string<TType, TCount, TOverflow> &str, format_context &ctx) const
{
return write_padded(ctx, [this, str](auto out) { return fmt::format_to(out, "{}", nested(str.c_str())); });
return write_padded(ctx, [this, str](auto out) { return v10::format_to(out, "{}", nested(str.c_str())); });
}
};
template <typename T>
using Ref = eastl::shared_ptr<T>;
template <typename T>
using WeakRef = eastl::weak_ptr<T>;

View File

@ -1,132 +1,137 @@
// =============================================
// Aster: image.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "global.h"
struct StorageTexture;
struct Device;
[[nodiscard]] inline vk::Extent2D
ToExtent2D(vk::Extent3D const &extent)
ToExtent2D(const vk::Extent3D &extent)
{
return {extent.width, extent.height};
}
[[nodiscard]] inline vk::Extent3D
ToExtent3D(vk::Extent2D const &extent, u32 const depth)
ToExtent3D(const vk::Extent2D &extent, const u32 depth)
{
return {extent.width, extent.height, depth};
}
[[nodiscard]] inline vk::Offset2D
ToOffset2D(vk::Extent3D const &extent)
ToOffset2D(const vk::Extent3D &extent)
{
return {static_cast<i32>(extent.width), static_cast<i32>(extent.height)};
return {Cast<i32>(extent.width), Cast<i32>(extent.height)};
}
[[nodiscard]] inline vk::Offset3D
ToOffset3D(vk::Extent3D const &extent)
ToOffset3D(const vk::Extent3D &extent)
{
return {static_cast<i32>(extent.width), static_cast<i32>(extent.height), static_cast<i32>(extent.depth)};
return {Cast<i32>(extent.width), Cast<i32>(extent.height), Cast<i32>(extent.depth)};
}
struct Image
{
enum class FlagBits : u8
{
eSampled = 0x1,
eStorage = 0x2,
eCube = 0x4,
};
using Flags = vk::Flags<FlagBits>;
constexpr static Flags FLAGS = {};
Device const *m_Device = nullptr;
vk::Image m_Image = nullptr;
vk::ImageView m_View = nullptr;
VmaAllocation m_Allocation = nullptr;
vk::Extent3D m_Extent;
vk::Format m_Format;
// Image.m_MipLevels_ is used for bookkeeping
// If the image is Invalid, the remaining data in Image is used intrusively by `GpuResourceManager`.
u8 m_EmptyPadding_ = 0;
Flags m_Flags_ = {};
u8 m_Flags_ = 0;
u8 m_LayerCount = 0;
u8 m_MipLevels = 0;
[[nodiscard]] bool
IsValid() const
{
return m_Image;
}
[[nodiscard]] bool IsValid() const;
[[nodiscard]] bool IsOwned() const;
[[nodiscard]] u32 GetMipLevels() const;
[[nodiscard]] bool IsCommitted() const;
void SetCommitted(bool committed);
[[nodiscard]] u32
GetMipLevels() const
{
return m_MipLevels;
}
void Destroy(const Device *device);
void DestroyView(vk::ImageView imageView) const;
// Constructors.
explicit Image(Device const *device, vk::Image image, VmaAllocation allocation, vk::Extent3D extent,
vk::Format format, Flags flags, u8 layerCount, u8 mipLevels);
Image(Image &&other) noexcept;
Image &operator=(Image &&other) noexcept;
~Image();
DISALLOW_COPY_AND_ASSIGN(Image);
constexpr static u8 VALID_BIT = 1u << 7;
constexpr static u8 OWNED_BIT = 1u << 6;
constexpr static u8 COMMITTED_BIT = 1u << 5;
};
template <>
constexpr bool concepts::GpuResource<Image> = true;
struct Texture : Image
{
constexpr static Flags FLAGS = FlagBits::eSampled;
void Init(const Device *device, vk::Extent2D extent, vk::Format imageFormat, bool isMipMapped, cstr name = nullptr);
};
struct ImageCube : Image
static_assert(sizeof(Texture) == sizeof(Image));
struct TextureCube : Texture
{
constexpr static Flags FLAGS = FlagBits::eCube;
void
Init(const Device *device, u32 cubeSide, vk::Format imageFormat, bool isMipMapped = false, cstr name = nullptr);
};
struct TextureCube : Image
static_assert(sizeof(TextureCube) == sizeof(Image));
struct AttachmentImage : Image
{
constexpr static Flags FLAGS = Texture::FLAGS | ImageCube::FLAGS;
void Init(const Device *device, vk::Extent2D extent, vk::Format imageFormat, cstr name = nullptr);
};
struct StorageImage : Image
static_assert(sizeof(AttachmentImage) == sizeof(Image));
struct DepthImage : Image
{
constexpr static Flags FLAGS = FlagBits::eStorage;
void Init(const Device *device, vk::Extent2D extent, cstr name = nullptr);
};
struct StorageTexture : StorageImage
static_assert(sizeof(DepthImage) == sizeof(Image));
struct StorageTexture : Texture
{
constexpr static Flags FLAGS = StorageImage::FLAGS | Texture::FLAGS;
void Init(const Device *device, vk::Extent2D extent, vk::Format imageFormat, bool isSampled, cstr name = nullptr);
};
struct StorageTextureCube : StorageImage
static_assert(sizeof(StorageTexture) == sizeof(Image));
struct StorageTextureCube : StorageTexture
{
constexpr static Flags FLAGS = StorageImage::FLAGS | Texture::FLAGS | ImageCube::FLAGS;
void Init(const Device *device, u32 cubeSide, vk::Format imageFormat, bool isSampled, bool isMipMapped = false,
cstr name = nullptr);
};
namespace concepts
static_assert(sizeof(StorageTextureCube) == sizeof(Image));
inline bool
Image::IsValid() const
{
return m_Flags_ & VALID_BIT;
}
template <typename T>
concept AnyImage = std::derived_from<T, Image>;
inline bool
Image::IsOwned() const
{
return m_Flags_ & OWNED_BIT;
}
template <typename T, typename TInto>
concept ImageInto = std::derived_from<T, Image> and std::derived_from<TInto, Image> and
(static_cast<bool>(T::FLAGS & TInto::FLAGS) or std::same_as<Image, TInto>);
inline u32
Image::GetMipLevels() const
{
return m_MipLevels;
}
template <typename T>
concept AnyImageRef = Deref<T> and AnyImage<DerefType<T>>;
inline bool
Image::IsCommitted() const
{
return m_Flags_ & COMMITTED_BIT;
}
template <typename T, typename TTo>
concept ImageRefTo = Deref<T> and ImageInto<DerefType<T>, TTo>;
} // namespace concepts
inline void
Image::SetCommitted(const bool committed)
{
m_Flags_ = committed ? (m_Flags_ | COMMITTED_BIT) : (m_Flags_ & ~COMMITTED_BIT);
}

View File

@ -1,108 +0,0 @@
// =============================================
// Aster: image_view.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "global.h"
#include "image.h"
template <concepts::AnyImage TImage>
struct View
{
using ImageType = TImage;
Ref<Image> m_Image;
vk::ImageView m_View = nullptr;
vk::Extent3D m_Extent;
u8 m_BaseLayer = 0;
u8 m_LayerCount = 0;
u8 m_BaseMipLevel = 0;
u8 m_MipLevelCount = 0;
[[nodiscard]] vk::Image
GetImage() const
{
return m_Image->m_Image;
}
[[nodiscard]] bool
IsValid() const
{
return static_cast<bool>(m_Image);
}
View(Ref<Image> image, vk::ImageView const view, vk::Extent3D const extent, u8 const baseLayer, u8 const layerCount,
u8 const baseMipLevel, u8 const mipLevelCount)
: m_Image{std::move(image)}
, m_View{view}
, m_Extent{extent}
, m_BaseLayer{baseLayer}
, m_LayerCount{layerCount}
, m_BaseMipLevel{baseMipLevel}
, m_MipLevelCount{mipLevelCount}
{
}
View(View &&other) noexcept
: m_Image{std::move(other.m_Image)}
, m_View{Take(other.m_View)}
, m_Extent{std::move(other.m_Extent)}
, m_BaseLayer{other.m_BaseLayer}
, m_LayerCount{other.m_LayerCount}
, m_BaseMipLevel{other.m_BaseMipLevel}
, m_MipLevelCount{other.m_MipLevelCount}
{
}
View &
operator=(View &&other) noexcept
{
if (this == &other)
return *this;
using std::swap;
swap(m_Image, other.m_Image);
swap(m_View, other.m_View);
swap(m_Extent, other.m_Extent);
swap(m_BaseLayer, other.m_BaseLayer);
swap(m_LayerCount, other.m_LayerCount);
swap(m_BaseMipLevel, other.m_BaseMipLevel);
swap(m_MipLevelCount, other.m_MipLevelCount);
return *this;
}
DISALLOW_COPY_AND_ASSIGN(View);
~View()
{
if (!IsValid())
return;
m_Image->DestroyView(Take(m_View));
}
};
using ImageView = View<Image>;
using ImageCubeView = View<ImageCube>;
using TextureView = View<Texture>;
using TextureCubeView = View<TextureCube>;
using StorageImageView = View<StorageImage>;
using StorageTextureView = View<StorageTexture>;
using StorageTextureCubeView = View<StorageTextureCube>;
namespace concepts
{
template <typename T>
concept View = std::derived_from<T, View<typename T::ImageType>>;
template <typename T, typename TTo>
concept ViewTo = View<T> and ImageInto<typename T::ImageType, TTo>;
template <typename T>
concept ViewRef = Deref<T> and View<DerefType<T>>;
template <typename T, typename TTo>
concept ViewRefTo = ViewRef<T> and ImageInto<typename DerefType<T>::ImageType, TTo>;
} // namespace concepts

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: physical_device.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
@ -8,12 +8,10 @@
#include "global.h"
#include "surface.h"
#include <sstream>
#include <EASTL/fixed_vector.h>
struct Window;
struct Instance;
struct Context;
enum class QueueSupportFlagBits
{
@ -25,31 +23,6 @@ enum class QueueSupportFlagBits
using QueueSupportFlags = vk::Flags<QueueSupportFlagBits>;
inline std::string
// ReSharper disable once CppInconsistentNaming
format_as(QueueSupportFlags const &qfi)
{
std::stringstream sb;
if (qfi & QueueSupportFlagBits::eGraphics)
{
sb << "Graphics | ";
}
if (qfi & QueueSupportFlagBits::eTransfer)
{
sb << "Transfer | ";
}
if (qfi & QueueSupportFlagBits::eCompute)
{
sb << "Compute | ";
}
if (qfi & QueueSupportFlagBits::ePresent)
{
sb << "Present | ";
}
auto const sbv = sb.view();
return std::string(sbv.substr(0, sbv.size() - 3));
}
struct QueueFamilyInfo
{
u32 m_Index;
@ -57,12 +30,6 @@ struct QueueFamilyInfo
QueueSupportFlags m_Support;
};
inline std::string
format_as(QueueFamilyInfo const &qfi)
{
return fmt::format("Queue {}: Count={} Support={}", qfi.m_Index, qfi.m_Count, qfi.m_Support);
}
[[nodiscard]] vk::SurfaceCapabilitiesKHR
GetSurfaceCapabilities(vk::PhysicalDevice physicalDevice, vk::SurfaceKHR surface);
@ -81,12 +48,11 @@ struct PhysicalDevice final
eastl::vector<vk::PresentModeKHR> m_PresentModes;
eastl::vector<QueueFamilyInfo> m_QueueFamilies;
PhysicalDevice() = default;
PhysicalDevice(vk::SurfaceKHR surface, vk::PhysicalDevice physicalDevice);
};
class PhysicalDevices : public eastl::fixed_vector<PhysicalDevice, 4>
{
public:
PhysicalDevices(Surface const &surface, Instance const &context);
PhysicalDevices(const Surface *surface, const Context *context);
};

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: pipeline.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
@ -13,45 +13,12 @@ struct Device;
struct Pipeline
{
enum class Kind
{
eGraphics,
eCompute,
};
Device const *m_Device = nullptr;
const Device *m_Device;
vk::PipelineLayout m_Layout;
vk::Pipeline m_Pipeline = nullptr;
vk::Pipeline m_Pipeline;
eastl::vector<vk::DescriptorSetLayout> m_SetLayouts;
Kind m_Kind;
Pipeline() = default;
Pipeline(Device const *device, vk::PipelineLayout layout, vk::Pipeline pipeline,
eastl::vector<vk::DescriptorSetLayout> &&setLayouts, Kind kind);
Pipeline(const Device *device, vk::PipelineLayout layout, vk::Pipeline pipeline,
eastl::vector<vk::DescriptorSetLayout> &&setLayouts);
~Pipeline();
DISALLOW_COPY_AND_ASSIGN(Pipeline);
Pipeline(Pipeline &&other) noexcept
: m_Device{other.m_Device}
, m_Layout{Take(other.m_Layout)}
, m_Pipeline{Take(other.m_Pipeline)}
, m_SetLayouts{std::move(other.m_SetLayouts)}
, m_Kind{other.m_Kind}
{
}
Pipeline &
operator=(Pipeline &&other) noexcept
{
if (this == &other)
return *this;
using eastl::swap;
swap(m_Device, other.m_Device);
swap(m_Layout, other.m_Layout);
swap(m_Pipeline, other.m_Pipeline);
swap(m_SetLayouts, other.m_SetLayouts);
swap(m_Kind, other.m_Kind);
return *this;
}
};

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: queue_allocation.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once

View File

@ -1,32 +0,0 @@
// =============================================
// Aster: sampler.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "global.h"
struct Device;
struct Sampler final
{
Device const *m_Device = nullptr;
vk::Sampler m_Sampler = nullptr;
[[nodiscard]] bool
IsValid() const
{
return m_Sampler;
}
// Constructors
Sampler(Device const *device, vk::SamplerCreateInfo const &samplerCreateInfo, cstr name);
~Sampler();
Sampler(Sampler &&other) noexcept;
Sampler &operator=(Sampler &&other) noexcept;
DISALLOW_COPY_AND_ASSIGN(Sampler);
};

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: size.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
@ -9,33 +9,10 @@
struct Size2D
{
u32 m_Width = 0;
u32 m_Height = 0;
Size2D() = default;
Size2D(u32 const width, u32 const height)
: m_Width{width}
, m_Height{height}
{
}
Size2D(vk::Extent2D const extent)
: m_Width{extent.width}
, m_Height{extent.height}
{
}
Size2D &
operator=(vk::Extent2D const other)
{
m_Height = other.height;
m_Width = other.width;
return *this;
}
bool operator==(Size2D const &) const = default;
u32 m_Width;
u32 m_Height;
explicit
operator vk::Extent2D() const
{
return {m_Width, m_Height};

View File

@ -1,24 +1,23 @@
// =============================================
// Aster: surface.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "global.h"
struct Instance;
struct Context;
struct Window;
struct Surface
{
Instance *m_Context;
Context *m_Context;
vk::SurfaceKHR m_Surface;
NameString m_Name;
// Ctor Dtor
Surface() = default;
Surface(Instance &context, Window const &window);
Surface(Context *context, const Window *window, cstr name);
~Surface();
// Move

View File

@ -1,6 +1,6 @@
/// =============================================
// Aster: swapchain.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// ==============================================
#pragma once
@ -19,8 +19,9 @@ struct Swapchain final
{
using FnResizeCallback = eastl::function<void(vk::Extent2D)>;
Device const *m_Device;
const Device *m_Device;
vk::SwapchainKHR m_Swapchain;
NameString m_Name;
vk::Extent2D m_Extent;
vk::Format m_Format;
eastl::fixed_vector<vk::Image, 4> m_Images;
@ -28,12 +29,11 @@ struct Swapchain final
eastl::vector<FnResizeCallback> m_ResizeCallbacks;
void Create(Surface const &surface, Size2D size);
void Create(const Surface *window, Size2D size);
void RegisterResizeCallback(FnResizeCallback &&callback);
// Ctor/Dtor
Swapchain() = default;
Swapchain(Surface const &surface, Device const &device, Size2D size);
Swapchain(const Surface *window, const Device *device, Size2D size, NameString &&name);
~Swapchain();
// Move
@ -42,6 +42,6 @@ struct Swapchain final
DISALLOW_COPY_AND_ASSIGN(Swapchain);
private:
private:
void Cleanup();
};

View File

@ -1,28 +1,36 @@
// =============================================
// Aster: type_traits.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "constants.h"
struct Device;
struct Image;
namespace concepts
{
template <typename T>
concept Deref = requires(T a) {
{ *a };
concept DeviceDestructible = requires(T a, Device *p) {
{ a.Destroy(p) } -> std::convertible_to<void>;
};
template <typename TRef, typename TVal>
concept DerefTo = requires(TRef a) {
{ *a } -> std::convertible_to<TVal>;
template <typename T>
concept Committable = requires(T a, bool v) {
{ a.IsCommitted() } -> std::convertible_to<bool>;
{ a.SetCommitted(v) } -> std::convertible_to<void>;
};
template <Deref T>
using DerefType = std::remove_cvref_t<decltype(*std::declval<T>())>;
template <typename T>
constexpr bool GpuResource = false;
template <typename T>
concept RenderResource = GpuResource<T> and std::is_default_constructible_v<T> and std::is_trivially_copyable_v<T> and
DeviceDestructible<T> and Committable<T>;
template <typename T>
constexpr bool IsHandle = false;
template <typename THandle>
concept HandleType = IsHandle<THandle> and RenderResource<typename THandle::Type>;
} // namespace concepts

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: window.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
@ -20,8 +20,6 @@ struct Window final
static std::atomic_uint64_t m_WindowCount;
static std::atomic_bool m_IsGlfwInit;
static void SetupLibrary();
static cstr *GetInstanceExtensions(u32 *extensionCount);
// Methods
[[nodiscard]] bool
@ -32,7 +30,7 @@ struct Window final
}
void RequestExit() const noexcept;
void SetWindowSize(vk::Extent2D const &extent) const noexcept;
void SetWindowSize(const vk::Extent2D &extent) const noexcept;
void SetWindowSize(u32 width, u32 height) const noexcept;
/// Actual size of the framebuffer being used for the window render.
[[nodiscard]] Size2D GetSize() const;

View File

@ -4,7 +4,7 @@ cmake_minimum_required(VERSION 3.13)
target_sources(aster_core
INTERFACE
"rendering_device.h"
"resource.h"
"context.h"
"commit_manager.h")
"manager.h"
"buffer_manager.h"
"image_manager.h"
"render_resource_manager.h")

View File

@ -0,0 +1,24 @@
// =============================================
// Aster: buffer_manager.h
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "aster/aster.h"
#include "aster/core/buffer.h"
#include "manager.h"
namespace systems
{
using BufferHandle = Handle<Buffer>;
class BufferManager final : public Manager<Buffer>
{
public:
BufferManager(const Device *device, const u32 maxCount, const u8 binding);
[[nodiscard]] Handle CreateStorageBuffer(usize size, cstr name = nullptr);
[[nodiscard]] Handle CreateUniformBuffer(usize size, cstr name = nullptr);
};
} // namespace systems

View File

@ -1,375 +0,0 @@
// =============================================
// Aster: render_resource_manager.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "aster/aster.h"
#include "aster/util/freelist.h"
#include "EASTL/deque.h"
#include "EASTL/intrusive_hash_map.h"
#include "EASTL/vector.h"
#include "aster/core/buffer.h"
#include "aster/core/image_view.h"
#include "aster/core/sampler.h"
#include "resource.h"
namespace systems
{
class RenderingDevice;
class CommitManager
{
template <typename T>
struct HandleMapper
{
using Type = T;
using Handle = Ref<Type>;
using Resource = ResId<Type>;
struct Entry : eastl::intrusive_hash_node_key<Handle>
{
std::atomic<u32> m_CommitCount;
void
AddRef()
{
auto const rc = ++m_CommitCount;
assert(rc > 0);
}
void
Release()
{
auto const rc = --m_CommitCount;
assert(rc < MaxValue<u32>);
}
u32
IsReferenced() const
{
return m_CommitCount;
}
bool
operator==(Entry const &other) const
{
return this->mKey == other.mKey;
}
Entry *
Next()
{
return reinterpret_cast<Entry *>(this->mpNext);
}
void
SetNext(Entry &entry)
{
this->mpNext = &entry;
}
struct Hash
{
usize
operator()(Handle const &e)
{
return eastl::hash<Type *>()(e.get());
}
};
};
eastl::vector<Entry> m_Data;
FreeList<Entry> m_FreeList;
eastl::intrusive_hash_map<typename Entry::key_type, Entry, 31, typename Entry::Hash> m_InUse;
std::array<FreeList<Entry>, 4> m_ToDelete;
u8 m_ToDeleteIndex = 0;
explicit HandleMapper(u32 const maxCount)
: m_Data{maxCount}
{
// Setup freelist
for (auto it = m_Data.rbegin(); it != m_Data.rend(); ++it)
{
m_FreeList.Push(*it);
}
}
~HandleMapper()
{
for (auto &toDelete : m_ToDelete)
{
ClearEntries(toDelete);
}
}
PIN_MEMORY(HandleMapper);
/// Returns a commit, and a bool signifying if it is a new commit.
std::tuple<Resource, bool>
Create(Handle const &object)
{
// Get-from freelist
assert(!m_FreeList.Empty());
auto it = m_InUse.find(object);
if (it != m_InUse.end())
{
it->AddRef();
auto i = GetIndex(*it);
return {Resource{i}, false};
}
Entry &data = m_FreeList.Pop();
data.mKey = object;
data.m_CommitCount = 0;
m_InUse.insert(data);
auto i = GetIndex(data);
return {Resource{i}, true};
}
Handle
GetHandle(Resource const &res)
{
return m_Data[res.m_Index].mKey;
}
void
AddRef(Resource const &commit)
{
m_Data.at(commit.m_Index).AddRef();
}
void
Release(Resource const &commit)
{
auto &entry = m_Data.at(commit.m_Index);
entry.Release();
if (!entry.IsReferenced())
{
QueueDelete(entry);
}
}
/**
* Sweeps through the delete queue.
* All freed items are cleared. (With a 3 frame delay)
*/
void
Update()
{
m_ToDeleteIndex = (m_ToDeleteIndex + 1) % m_ToDelete.size();
auto &list = m_ToDelete[m_ToDeleteIndex];
ClearEntries(list);
}
private:
u32
GetIndex(Entry const &entry)
{
return static_cast<u32>(&entry - m_Data.begin());
}
void
QueueDelete(Entry &entry)
{
m_InUse.remove(entry);
m_ToDelete[m_ToDeleteIndex].Push(entry);
}
void
ClearEntries(FreeList<Entry> &entries)
{
while (!entries.Empty())
{
Entry &entry = entries.Pop();
entry.mKey.reset();
entry.m_CommitCount = 0;
}
}
};
union WriteInfo {
vk::DescriptorBufferInfo uBufferInfo;
vk::DescriptorImageInfo uImageInfo;
vk::BufferView uBufferView;
explicit WriteInfo(vk::DescriptorBufferInfo const &info);
explicit WriteInfo(vk::DescriptorImageInfo const &info);
explicit WriteInfo(vk::BufferView const &info);
};
using WriteCommand = vk::WriteDescriptorSet;
// using WriteOwner = std::variant<Handle<Buffer>, Handle<Image>>;
public:
RenderingDevice const *m_Device;
CommitManager(RenderingDevice const *device, u32 maxBuffers, u32 maxImages, u32 maxStorageImages,
Ref<Sampler> defaultSampler);
~CommitManager();
PIN_MEMORY(CommitManager);
// Commit Buffer
private:
ResId<Buffer> CommitBuffer(Ref<Buffer> const &buffer);
public:
// Commit Storage Images
ResId<StorageImageView>
CommitStorageImage(concepts::ViewRefTo<StorageImage> auto const &image)
{
return CommitStorageImage(CastView<StorageImageView>(image));
}
ResId<StorageImageView> CommitStorageImage(Ref<StorageImageView> const &image);
// Sampled Images
ResId<TextureView>
CommitTexture(concepts::ViewRefTo<Texture> auto const &image, Ref<Sampler> const &sampler)
{
return CommitTexture(CastView<TextureView>(image), sampler);
}
ResId<TextureView>
CommitTexture(concepts::ViewRefTo<Texture> auto const &image)
{
return CommitTexture(CastView<TextureView>(image));
}
ResId<TextureView> CommitTexture(Ref<TextureView> const &handle);
ResId<TextureView> CommitTexture(Ref<TextureView> const &image, Ref<Sampler> const &sampler);
void Update();
Ref<Buffer>
FetchHandle(ResId<Buffer> const &id)
{
return m_Buffers.GetHandle(id);
}
Ref<TextureView>
FetchHandle(ResId<TextureView> const &id)
{
return m_Images.GetHandle(id);
}
Ref<StorageImageView>
FetchHandle(ResId<StorageImageView> const &id)
{
return m_StorageImages.GetHandle(id);
}
[[nodiscard]] vk::DescriptorSetLayout const &
GetDescriptorSetLayout() const
{
return m_SetLayout;
}
[[nodiscard]] vk::DescriptorSet const &
GetDescriptorSet() const
{
return m_DescriptorSet;
}
static CommitManager &
Instance()
{
assert(m_Instance);
return *m_Instance;
}
static bool
IsInit()
{
return static_cast<bool>(m_Instance);
}
private:
vk::DescriptorPool m_DescriptorPool;
vk::DescriptorSetLayout m_SetLayout;
vk::DescriptorSet m_DescriptorSet;
constexpr static u8 BUFFER_BINDING_INDEX = 0x0;
constexpr static u8 IMAGE_BINDING_INDEX = 0x1;
constexpr static u8 STORAGE_IMAGE_BINDING_INDEX = 0x2;
HandleMapper<Buffer> m_Buffers;
HandleMapper<TextureView> m_Images;
HandleMapper<StorageImageView> m_StorageImages;
Ref<Sampler> m_DefaultSampler;
eastl::vector<vk::WriteDescriptorSet> m_Writes;
eastl::deque<WriteInfo> m_WriteInfos;
// eastl::vector<WriteOwner> m_WriteOwner;
static CommitManager *m_Instance;
friend ResId<Buffer>;
friend ResId<TextureView>;
friend ResId<StorageImageView>;
void
AddRef(ResId<Buffer> const &handle)
{
m_Buffers.AddRef(handle);
}
void
AddRef(ResId<TextureView> const &handle)
{
m_Images.AddRef(handle);
}
void
AddRef(ResId<StorageImageView> const &handle)
{
m_StorageImages.AddRef(handle);
}
void
Release(ResId<Buffer> const &handle)
{
m_Buffers.Release(handle);
}
void
Release(ResId<TextureView> const &handle)
{
m_Images.Release(handle);
}
void
Release(ResId<StorageImageView> const &handle)
{
m_StorageImages.Release(handle);
}
};
template <typename T>
void
ResId<T>::AddRef() const
{
if (m_Index != INVALID)
CommitManager::Instance().AddRef(*this);
}
template <typename T>
void
ResId<T>::Release() const
{
if (m_Index != INVALID)
CommitManager::Instance().Release(*this);
}
} // namespace systems

View File

@ -1,471 +0,0 @@
// =============================================
// Aster: context_pool.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "EASTL/span.h"
#include "context.h"
#include <aster/aster.h>
#include <aster/core/buffer.h>
#include <aster/core/image.h>
#include <aster/core/image_view.h>
#include <aster/core/physical_device.h>
#include <aster/core/pipeline.h>
#include <EASTL/intrusive_list.h>
#include <EASTL/optional.h>
#include <EASTL/vector.h>
#include <foonathan/memory/memory_pool.hpp>
#include <foonathan/memory/namespace_alias.hpp>
namespace systems
{
class RenderingDevice;
struct Frame;
namespace _internal
{
class ComputeContextPool;
class GraphicsContextPool;
class TransferContextPool;
class ContextPool;
} // namespace _internal
#define DEPRECATE_RAW_CALLS
class Context
{
protected:
_internal::ContextPool *m_Pool;
vk::CommandBuffer m_Cmd;
friend RenderingDevice;
friend _internal::ContextPool;
explicit Context(_internal::ContextPool &pool, vk::CommandBuffer const cmd)
: m_Pool{&pool}
, m_Cmd{cmd}
{
}
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(Ref<Buffer> const &buffer);
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(Ref<Image> const &image);
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(Ref<ImageView> const &view);
public:
DEPRECATE_RAW_CALLS void Dependency(vk::DependencyInfo const &dependencyInfo);
void Begin();
void End();
void BeginDebugRegion(cstr name, vec4 color = {});
void EndDebugRegion();
};
// Inline the no-op if not debug.
#if defined(ASTER_NDEBUG)
inline void
Context::BeginDebugRegion(cstr name, vec4 color)
{
}
inline void
Context::EndDebugRegion()
{
}
#endif
class TransferContext : public Context
{
protected:
friend RenderingDevice;
friend _internal::TransferContextPool;
explicit TransferContext(_internal::ContextPool &pool, vk::CommandBuffer const cmd)
: Context{pool, cmd}
{
}
void UploadBuffer(Ref<Buffer> const &buffer, usize size, void const *data);
public:
void UploadTexture(Ref<Image> const &image, eastl::span<u8> const &data);
void
UploadBuffer(Ref<Buffer> const &buffer, std::ranges::range auto const &data)
{
auto const span = eastl::span{data.begin(), data.end()};
UploadBuffer(buffer, span.size_bytes(), span.data());
}
DEPRECATE_RAW_CALLS void Blit(vk::BlitImageInfo2 const &mipBlitInfo);
TransferContext(TransferContext &&other) noexcept;
TransferContext &operator=(TransferContext &&other) noexcept;
~TransferContext() = default;
DISALLOW_COPY_AND_ASSIGN(TransferContext);
};
class ComputeContext : public TransferContext
{
protected:
friend RenderingDevice;
friend _internal::ComputeContextPool;
Pipeline const *m_PipelineInUse;
explicit ComputeContext(_internal::ContextPool &pool, vk::CommandBuffer const cmd)
: TransferContext{pool, cmd}
, m_PipelineInUse{nullptr}
{
}
void PushConstantBlock(usize offset, usize size, void const *data);
void Dispatch(Pipeline const &pipeline, u32 x, u32 y, u32 z, usize size, void *data);
public:
void BindPipeline(Pipeline const &pipeline);
void
PushConstantBlock(auto const &block)
{
if constexpr (sizeof block > 128)
WARN("Vulkan only guarantees 128 bytes of Push Constants. Size of PCB is {}", sizeof block);
PushConstantBlock(0, sizeof block, &block);
}
void
PushConstantBlock(usize const offset, auto const &block)
{
if (offset + sizeof block > 128)
WARN("Vulkan only guarantees 128 bytes of Push Constants. Size of PCB is {}, at offset {}", sizeof block,
offset);
PushConstantBlock(offset, sizeof block, &block);
}
void
Dispatch(Pipeline const &pipeline, u32 const x, u32 const y, u32 const z, auto &pushConstantBlock)
{
if constexpr (sizeof pushConstantBlock > 128)
WARN("Vulkan only guarantees 128 bytes of Push Constants. Size of PCB is {}", sizeof pushConstantBlock);
Dispatch(pipeline, x, y, z, sizeof pushConstantBlock, &pushConstantBlock);
}
};
class GraphicsContext : public ComputeContext
{
protected:
friend RenderingDevice;
friend _internal::GraphicsContextPool;
explicit GraphicsContext(_internal::ContextPool &pool, vk::CommandBuffer const cmd)
: ComputeContext{pool, cmd}
{
}
public:
DEPRECATE_RAW_CALLS void SetViewport(vk::Viewport const &viewport);
void BindVertexBuffer(Ref<VertexBuffer> const &vertexBuffer);
void BindIndexBuffer(Ref<IndexBuffer> const &indexBuffer);
void Draw(usize vertexCount);
void DrawIndexed(usize indexCount);
void DrawIndexed(usize indexCount, usize firstIndex, usize firstVertex);
DEPRECATE_RAW_CALLS void BeginRendering(vk::RenderingInfo const &renderingInfo);
void EndRendering();
DEPRECATE_RAW_CALLS vk::CommandBuffer
GetCommandBuffer() const
{
return m_Cmd;
}
};
namespace _internal
{
class ContextPool
{
protected:
RenderingDevice *m_Device;
vk::CommandPool m_Pool;
eastl::vector<vk::CommandBuffer> m_CommandBuffers;
u32 m_BuffersAllocated;
public:
u16 m_ExtraData;
enum class ManagedBy : u8
{
eFrame,
eDevice,
} m_ManagedBy;
protected:
eastl::vector<Ref<Buffer>> m_OwnedBuffers;
eastl::vector<Ref<Image>> m_OwnedImages;
eastl::vector<Ref<ImageView>> m_OwnedImageViews;
vk::CommandBuffer AllocateCommandBuffer();
public:
[[nodiscard]] RenderingDevice &
GetDevice() const
{
assert(m_Device);
return *m_Device;
}
eastl::function<void(ContextPool &)> m_ResetCallback;
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(Ref<Buffer> const &buffer);
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(Ref<Image> const &image);
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(Ref<ImageView> const &view);
Context CreateContext();
void Reset();
ContextPool() = default;
ContextPool(RenderingDevice &device, u32 queueFamilyIndex, ManagedBy managedBy);
ContextPool(ContextPool &&other) noexcept;
ContextPool &operator=(ContextPool &&other) noexcept;
bool
operator==(ContextPool const &other) const
{
return m_Pool == other.m_Pool;
}
~ContextPool();
DISALLOW_COPY_AND_ASSIGN(ContextPool);
};
class TransferContextPool : public ContextPool
{
public:
TransferContext CreateTransferContext();
TransferContextPool() = default;
TransferContextPool(RenderingDevice &device, u32 const queueFamilyIndex, ManagedBy const managedBy)
: ContextPool{device, queueFamilyIndex, managedBy}
{
}
TransferContextPool(TransferContextPool &&other) noexcept = default;
TransferContextPool &operator=(TransferContextPool &&other) noexcept = default;
~TransferContextPool() = default;
DISALLOW_COPY_AND_ASSIGN(TransferContextPool);
};
class ComputeContextPool : public TransferContextPool
{
public:
ComputeContext CreateComputeContext();
ComputeContextPool() = default;
ComputeContextPool(RenderingDevice &device, u32 const queueFamilyIndex, ManagedBy const managedBy)
: TransferContextPool{device, queueFamilyIndex, managedBy}
{
}
ComputeContextPool(ComputeContextPool &&other) noexcept = default;
ComputeContextPool &operator=(ComputeContextPool &&other) noexcept = default;
~ComputeContextPool() = default;
DISALLOW_COPY_AND_ASSIGN(ComputeContextPool);
};
class GraphicsContextPool : public ComputeContextPool
{
public:
GraphicsContext CreateGraphicsContext();
GraphicsContextPool() = default;
GraphicsContextPool(RenderingDevice &device, u32 const queueFamilyIndex, ManagedBy const managedBy)
: ComputeContextPool{device, queueFamilyIndex, managedBy}
{
}
GraphicsContextPool(GraphicsContextPool &&other) noexcept = default;
GraphicsContextPool &operator=(GraphicsContextPool &&other) noexcept = default;
~GraphicsContextPool() = default;
DISALLOW_COPY_AND_ASSIGN(GraphicsContextPool);
};
template <std::derived_from<ContextPool> TContextPool>
class OrderlessContextPool
{
using ContextPoolType = TContextPool;
struct ContextListEntry : eastl::intrusive_list_node
{
ContextPoolType m_Pool;
bool
Contains(ContextPool const &other) const
{
return m_Pool == other;
}
};
using ContextListType = eastl::intrusive_list<ContextListEntry>;
RenderingDevice *m_Device;
memory::memory_pool<> m_ContextPoolEntryMemory;
ContextListType m_FreeContextPools;
ContextListType m_UsedContextPools;
u32 m_QueueFamilyIndex;
constexpr static usize ENTRY_SIZE = sizeof(ContextListEntry);
constexpr static usize ENTRIES_PER_BLOCK = 5;
constexpr static usize BLOCK_SIZE = ENTRIES_PER_BLOCK * ENTRY_SIZE;
public:
OrderlessContextPool()
: m_Device{nullptr}
, m_ContextPoolEntryMemory{ENTRY_SIZE, BLOCK_SIZE}
, m_QueueFamilyIndex{0}
{
}
void
Init(RenderingDevice &device, u32 const queueFamilyIndex)
{
m_Device = &device;
m_QueueFamilyIndex = queueFamilyIndex;
}
TransferContext
CreateTransferContext()
requires std::derived_from<TContextPool, TransferContextPool>
{
if (!m_FreeContextPools.empty())
{
ContextListEntry &entry = m_FreeContextPools.back();
m_FreeContextPools.pop_back();
m_UsedContextPools.push_back(entry);
return entry.m_Pool.CreateTransferContext();
}
ContextListEntry &entry = *static_cast<ContextListEntry *>(m_ContextPoolEntryMemory.allocate_node());
auto pool = ContextPoolType{*m_Device, m_QueueFamilyIndex, ContextPool::ManagedBy::eDevice};
pool.m_ResetCallback = [this](ContextPool &resetPool) { this->ReleasePool(resetPool); };
new (&entry) ContextListEntry{
.m_Pool = eastl::move(pool),
};
m_UsedContextPools.push_back(entry);
return entry.m_Pool.CreateTransferContext();
}
ComputeContext
CreateComputeContext()
requires std::derived_from<TContextPool, ComputeContextPool>
{
if (!m_FreeContextPools.empty())
{
ContextListEntry &entry = m_FreeContextPools.back();
m_FreeContextPools.pop_back();
m_UsedContextPools.push_back(entry);
return entry.m_Pool.CreateComputeContext();
}
ContextListEntry &entry = *static_cast<ContextListEntry *>(m_ContextPoolEntryMemory.allocate_node());
auto pool = ContextPoolType{*m_Device, m_QueueFamilyIndex, ContextPool::ManagedBy::eDevice};
pool.m_ResetCallback = [this](ContextPool &resetPool) { this->ReleasePool(resetPool); };
new (&entry) ContextListEntry{
.m_Pool = eastl::move(pool),
};
m_UsedContextPools.push_back(entry);
return entry.m_Pool.CreateComputeContext();
}
void
ReleasePool(ContextPool &pool)
{
auto const found = eastl::find_if(m_UsedContextPools.begin(), m_UsedContextPools.end(),
[&pool](ContextListEntry const &v) { return v.Contains(pool); });
auto &v = *found;
ContextListType::remove(v);
pool.Reset();
m_FreeContextPools.push_back(v);
}
OrderlessContextPool(OrderlessContextPool &&other) noexcept
: m_Device{other.m_Device}
, m_ContextPoolEntryMemory{std::move(other.m_ContextPoolEntryMemory)}
, m_FreeContextPools{other.m_FreeContextPools}
, m_UsedContextPools{other.m_UsedContextPools}
, m_QueueFamilyIndex{other.m_QueueFamilyIndex}
{
other.m_FreeContextPools.clear();
other.m_UsedContextPools.clear();
}
OrderlessContextPool &
operator=(OrderlessContextPool &&other) noexcept
{
if (this == &other)
return *this;
m_Device = other.m_Device;
m_ContextPoolEntryMemory = std::move(other.m_ContextPoolEntryMemory);
m_FreeContextPools = other.m_FreeContextPools;
other.m_FreeContextPools.clear();
m_UsedContextPools = other.m_UsedContextPools;
other.m_UsedContextPools.clear();
m_QueueFamilyIndex = other.m_QueueFamilyIndex;
return *this;
}
~OrderlessContextPool()
{
for (auto &entry : m_FreeContextPools)
{
entry.m_Pool.~ContextPoolType();
}
for (auto &entry : m_UsedContextPools)
{
entry.m_Pool.~ContextPoolType();
}
// The allocations will 'wink' away.
}
DISALLOW_COPY_AND_ASSIGN(OrderlessContextPool);
};
using OrderlessTransferContextPool = OrderlessContextPool<TransferContextPool>;
using OrderlessComputeContextPool = OrderlessContextPool<ComputeContextPool>;
} // namespace _internal
} // namespace systems

View File

@ -0,0 +1,60 @@
// =============================================
// Aster: image_manager.h
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "aster/aster.h"
#include "aster/core/image.h"
#include "manager.h"
namespace systems
{
struct Texture2DCreateInfo
{
vk::Format m_Format = vk::Format::eUndefined;
vk::Extent2D m_Extent = {};
cstr m_Name = nullptr;
bool m_IsSampled = true;
bool m_IsMipMapped = false;
bool m_IsStorage = false;
};
struct TextureCubeCreateInfo
{
vk::Format m_Format = vk::Format::eUndefined;
u32 m_Side = 0;
cstr m_Name = nullptr;
bool m_IsSampled = true;
bool m_IsMipMapped = false;
bool m_IsStorage = false;
};
struct AttachmentCreateInfo
{
vk::Format m_Format = vk::Format::eUndefined;
vk::Extent2D m_Extent = {};
cstr m_Name = nullptr;
};
struct DepthStencilImageCreateInfo
{
vk::Extent2D m_Extent = {};
cstr m_Name = nullptr;
};
using ImageHandle = Handle<Image>;
class ImageManager final : public Manager<Image>
{
public:
ImageManager(const Device *device, const u32 maxCount, const u8 binding);
[[nodiscard]] Handle CreateTexture2D(const Texture2DCreateInfo &createInfo);
[[nodiscard]] Handle CreateTextureCube(const TextureCubeCreateInfo &createInfo);
[[nodiscard]] Handle CreateAttachment(const AttachmentCreateInfo &createInfo);
[[nodiscard]] Handle CreateDepthStencilImage(const DepthStencilImageCreateInfo &createInfo);
};
} // namespace systems

View File

@ -0,0 +1,361 @@
// =============================================
// Aster: manager.h
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "aster/aster.h"
#include "aster/core/type_traits.h"
struct Device;
template <concepts::RenderResource T>
class Handle;
template <concepts::RenderResource T>
class Manager
{
friend Handle<T>;
public:
using Type = T;
using Handle = Handle<Type>;
static_assert(sizeof(Handle) == sizeof(u32));
constexpr static u32 MAX_HANDLES = Handle::INDEX_MASK + 1;
/**
* Constructor for the Manager class template.
* @param device Device with which resources are created.
* @param maxCount Max number of resources that can be created (maxCount <= Handle::INDEX_MASK)
* @param binding The shader binding at which this manager will bind its resources.
*/
explicit Manager(const Device *device, const u32 maxCount, const u8 binding)
: m_MaxCount{maxCount}
, m_Binding{binding}
, m_Device{device}
{
assert(!m_Instance);
assert(maxCount <= MAX_HANDLES);
m_Data = new Type[m_MaxCount];
m_RefCount = new std::atomic<u32>[m_MaxCount];
for (u32 i = 0; i < m_MaxCount; ++i)
{
*Recast<u32 *>(&m_Data[i]) = (i + 1);
}
m_Instance = this;
}
virtual ~Manager()
{
if (!m_Data)
return;
for (u32 i = 0; i < m_MaxCount; ++i)
{
m_Data[i].Destroy(m_Device);
}
delete[] m_Data;
delete[] m_RefCount;
m_Data = nullptr;
m_RefCount = nullptr;
m_MaxCount = 0;
m_FreeHead = 0;
m_Device = nullptr;
m_Instance = nullptr;
}
/**
* @warning only to be used internally.
* @return The only constructed instance of this manager.
*/
static Manager *
Instance()
{
assert(m_Instance);
return m_Instance;
}
PIN_MEMORY(Manager);
private:
Type *m_Data = nullptr; // Data also keeps the freelist during 'not use'.
std::atomic<u32> *m_RefCount = nullptr; // Associated reference count for each of the instances in Data.
u32 m_MaxCount = 0; // Max number of resources supported.
u32 m_FreeHead = 0;
u8 m_Binding = 0;
static Manager *m_Instance;
/**
* User is expected to type-check.
* @param index Actual index of the resource in the m_Data array. Not type checked.
*/
void
AddRef(const u32 index)
{
assert(index < m_MaxCount);
++m_RefCount[index];
}
/**
* User is expected to type-check.
* @param index Actual index of the resource in the m_Data array. Not type checked.
*/
void
Release(const u32 index)
{
assert(index < m_MaxCount);
const u32 rc = --m_RefCount[index];
assert(rc != MaxValue<u32>);
if (rc == 0)
{
// TODO: Don't destroy here. Separate out to a cleanup routine.
m_Data[index].Destroy(m_Device);
}
}
/**
* User is expected to type-check.
* @param index Actual index of the resource in the m_Data array. Not type checked.
* @return Pointer to the resource at the index.
*/
Type *
Fetch(const u32 index)
{
assert(index < m_MaxCount);
return &m_Data[index];
}
protected:
const Device *m_Device;
/**
* Internal Method to Allocate a resource on the manager.
* @return [Handle, Type*] Where Type* is available to initialize the resource.
*/
[[nodiscard]] std::pair<Handle, Type *>
Alloc()
{
ERROR_IF(m_FreeHead >= m_MaxCount, "Max buffers allocated.") THEN_ABORT(-1);
const auto index = m_FreeHead;
Type *pAlloc = &m_Data[index];
m_FreeHead = *Recast<u32 *>(pAlloc);
return {Handle{index, m_Binding}, pAlloc};
}
};
template <concepts::RenderResource T>
class Ref
{
public:
using Type = T;
using Handle = Handle<Type>;
using Manager = Manager<Type>;
protected:
Handle m_Handle;
Type *m_Pointer = nullptr;
friend Handle;
void
InitPtr()
{
m_Pointer = m_Handle.Fetch();
}
public:
Type *
Get()
{
assert(m_Pointer);
return m_Pointer;
}
const Type *
Get() const
{
assert(m_Pointer);
return m_Pointer;
}
Type *
operator->()
{
return Get();
}
const Type *
operator->() const
{
return Get();
}
Type &
operator*()
{
return *Get();
}
const Type &
operator*() const
{
return Get();
}
// The only constructor requires a valid construction.
explicit Ref(Handle &&handle)
: m_Handle{std::forward<Handle>(handle)}
{
InitPtr();
}
// The only constructor requires a valid construction.
explicit Ref(const Handle &&handle)
: m_Handle{handle}
{
InitPtr();
}
Ref(const Ref &other) = default;
Ref(Ref &&other) noexcept = default;
Ref &operator=(const Ref &other) = default;
Ref &operator=(Ref &&other) noexcept = default;
~Ref() = default;
};
class RawHandle
{
protected:
constexpr static u32 INVALID_HANDLE = MaxValue<u32>;
constexpr static u32 INDEX_MASK = 0x0FFFFFFF;
constexpr static u32 TYPE_MASK = ~INDEX_MASK;
constexpr static u32 TYPE_OFFSET = GetMaskOffset(TYPE_MASK);
u32 m_Internal = INVALID_HANDLE;
RawHandle(const u32 index, const u8 typeId)
: m_Internal{(index & INDEX_MASK) | (typeId & TYPE_MASK)}
{
}
explicit RawHandle(const u32 internal)
: m_Internal{internal}
{
}
public:
[[nodiscard]] bool
IsValid() const
{
return m_Internal != INVALID_HANDLE;
}
[[nodiscard]] u32
GetIndex() const
{
return m_Internal & INDEX_MASK;
}
[[nodiscard]] u32
GetType() const
{
return (m_Internal & TYPE_MASK) >> TYPE_OFFSET;
}
bool
operator==(const RawHandle &other) const
{
return m_Internal == other.m_Internal;
}
};
template <concepts::RenderResource T>
class Handle : public RawHandle
{
public:
using Type = T;
using Manager = Manager<Type>;
protected:
// The only constructor requires a valid construction.
Handle(const u32 index, const u8 typeId)
: RawHandle{index, typeId}
{
AddRef();
}
friend Manager;
friend Ref<T>;
public:
Handle(const Handle &other)
: RawHandle{other}
{
AddRef();
}
Handle(Handle &&other) noexcept
: RawHandle{std::exchange(other.m_Internal, m_Internal)}
{
}
[[nodiscard]] Ref<T>
ToPointer()
{
return Ref{std::move(*this)};
}
[[nodiscard]] Type *
Fetch() const
{
return Manager::Instance()->Fetch(m_Internal);
}
Handle &
operator=(const Handle &other)
{
if (this == &other)
return *this;
m_Internal = other.m_Internal;
AddRef();
return *this;
}
Handle &
operator=(Handle &&other) noexcept
{
if (this == &other)
return *this;
std::swap(m_Internal, other.m_Internal);
return *this;
}
~Handle()
{
if (m_Internal != INVALID_HANDLE)
{
Release();
}
}
protected:
void
AddRef()
{
Manager::Instance()->AddRef(GetIndex());
}
void
Release()
{
Manager::Instance()->Release(GetIndex());
}
};

View File

@ -1,75 +0,0 @@
// =============================================
// Aster: pipeline_helpers.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include <aster/aster.h>
#include <EASTL/vector.h>
#include <slang.h>
#include <variant>
namespace systems
{
class RenderingDevice;
struct PipelineCreationError
{
std::variant<std::monostate, vk::Result, SlangResult> m_Data;
std::string What();
i32 Value();
operator bool() const;
PipelineCreationError(vk::Result res);
PipelineCreationError(SlangResult res);
PipelineCreationError();
};
vk::ShaderStageFlagBits SlangToVulkanShaderStage(SlangStage stage);
namespace _internal
{
struct PipelineLayoutBuilder
{
RenderingDevice *m_Device;
eastl::vector<vk::DescriptorSetLayout> m_DescriptorSetLayouts;
eastl::vector<vk::PushConstantRange> m_PushConstants;
vk::ShaderStageFlags m_Stage;
explicit PipelineLayoutBuilder(RenderingDevice *device, vk::DescriptorSetLayout bindlessLayout = {});
[[nodiscard]] vk::PipelineLayout Build();
[[nodiscard]] vk::DescriptorSetLayout
CreateDescriptorSetLayout(vk::DescriptorSetLayoutCreateInfo const &createInfo) const;
void AddDescriptorSetForParameterBlock(slang::TypeLayoutReflection *layout);
void AddPushConstantRangeForConstantBuffer(slang::TypeLayoutReflection *layout);
void AddSubObjectRange(slang::TypeLayoutReflection *layout, i64 subObjectRangeIndex);
void AddSubObjectRanges(slang::TypeLayoutReflection *layout);
};
struct DescriptorLayoutBuilder
{
PipelineLayoutBuilder *m_PipelineLayoutBuilder;
eastl::vector<vk::DescriptorSetLayoutBinding> m_LayoutBindings;
u32 m_SetIndex;
vk::ShaderStageFlags &Stage() const;
explicit DescriptorLayoutBuilder(PipelineLayoutBuilder *pipelineLayoutBuilder);
void AddGlobalScopeParameters(slang::ProgramLayout *layout);
void AddEntryPointParameters(slang::ProgramLayout *layout);
void AddEntryPointParameters(slang::EntryPointLayout *layout);
void AddAutomaticallyIntroducedUniformBuffer();
void AddRanges(slang::TypeLayoutReflection *layout);
void AddRangesForParamBlockElement(slang::TypeLayoutReflection *layout);
void AddDescriptorRange(slang::TypeLayoutReflection *layout, i64 relativeSetIndex, i64 rangeIndex);
void AddDescriptorRanges(slang::TypeLayoutReflection *layout);
void Build();
};
} // namespace _internal
} // namespace systems

View File

@ -0,0 +1,157 @@
// =============================================
// Aster: render_resource_manager.h
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "aster/aster.h"
#include "buffer_manager.h"
#include "image_manager.h"
#include "EASTL/deque.h"
#include "EASTL/vector.h"
namespace systems
{
class RenderResourceManager
{
private:
union WriteInfo {
vk::DescriptorBufferInfo uBufferInfo;
vk::DescriptorImageInfo uImageInfo;
vk::BufferView uBufferView;
explicit WriteInfo(const vk::DescriptorBufferInfo &info);
explicit WriteInfo(const vk::DescriptorImageInfo &info);
explicit WriteInfo(const vk::BufferView &info);
};
using WriteCommand = vk::WriteDescriptorSet;
union WriteOwner {
Handle<Buffer> uBufferHandle;
Handle<Image> uImageHandle;
explicit WriteOwner(const Handle<Buffer> &handle);
explicit WriteOwner(const Handle<Image> &handle);
WriteOwner(const WriteOwner &other)
{
switch (uRawHandle.GetType())
{
case BUFFER_BINDING_INDEX:
uBufferHandle = other.uBufferHandle;
break;
case IMAGE_BINDING_INDEX:
uImageHandle = other.uImageHandle;
break;
default:
ERROR("Invalid Handle type.") THEN_ABORT(-1);
}
}
WriteOwner(WriteOwner &&other) noexcept
{
switch (uRawHandle.GetType())
{
case BUFFER_BINDING_INDEX:
uBufferHandle = std::move(other.uBufferHandle);
break;
case IMAGE_BINDING_INDEX:
uImageHandle = std::move(other.uImageHandle);
break;
default:
ERROR("Invalid Handle type.") THEN_ABORT(-1);
}
}
WriteOwner &
operator=(const WriteOwner &other)
{
if (this == &other)
return *this;
switch (uRawHandle.GetType())
{
case BUFFER_BINDING_INDEX:
uBufferHandle = other.uBufferHandle;
break;
case IMAGE_BINDING_INDEX:
uImageHandle = other.uImageHandle;
break;
default:
ERROR("Invalid Handle type.") THEN_ABORT(-1);
}
return *this;
}
WriteOwner &
operator=(WriteOwner &&other) noexcept
{
if (this == &other)
return *this;
switch (uRawHandle.GetType())
{
case BUFFER_BINDING_INDEX:
uBufferHandle = std::move(other.uBufferHandle);
break;
case IMAGE_BINDING_INDEX:
uImageHandle = std::move(other.uImageHandle);
break;
default:
ERROR("Invalid Handle type.") THEN_ABORT(-1);
}
return *this;
}
~WriteOwner()
{
switch (uRawHandle.GetType())
{
case BUFFER_BINDING_INDEX:
uBufferHandle.~Handle();
return;
case IMAGE_BINDING_INDEX:
uImageHandle.~Handle();
return;
default:
ERROR("Invalid Handle type.") THEN_ABORT(-1);
}
}
private:
RawHandle uRawHandle;
};
public:
RenderResourceManager(const Device *device, u32 maxBuffers, u32 maxImages);
void Commit(concepts::HandleType auto &handle);
private:
BufferManager m_BufferManager;
ImageManager m_ImageManager;
vk::DescriptorPool m_DescriptorPool;
vk::DescriptorSetLayout m_SetLayout;
vk::DescriptorSet m_DescriptorSet;
constexpr static u8 BUFFER_BINDING_INDEX = 0;
constexpr static u8 IMAGE_BINDING_INDEX = 1;
eastl::vector<vk::WriteDescriptorSet> m_Writes;
eastl::deque<WriteInfo> m_WriteInfos;
eastl::vector<WriteOwner> m_WriteOwner;
#if !defined(ASTER_NDEBUG)
usize m_CommitedBufferCount = 0;
usize m_CommitedTextureCount = 0;
usize m_CommitedStorageTextureCount = 0;
#endif
};
} // namespace systems

View File

@ -1,646 +0,0 @@
// =============================================
// Aster: rendering_device.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "context.h"
#include "pipeline_helpers.h"
#include "resource.h"
#include "aster/aster.h"
#include "aster/core/buffer.h"
#include "aster/core/device.h"
#include "aster/core/image.h"
#include "aster/core/image_view.h"
#include "aster/core/instance.h"
#include "aster/core/physical_device.h"
#include "aster/core/pipeline.h"
#include "aster/core/sampler.h"
#include "aster/core/size.h"
#include "aster/core/swapchain.h"
#include <EASTL/hash_map.h>
#include <EASTL/optional.h>
#include <EASTL/variant.h>
#include <slang-com-ptr.h>
#include <slang.h>
constexpr static u32 MAX_FRAMES_IN_FLIGHT = 3;
struct Window;
template <>
struct eastl::hash<vk::SamplerCreateInfo>
{
usize
operator()(vk::SamplerCreateInfo const &createInfo) const noexcept
{
usize hash = HashAny(createInfo.flags);
hash = HashCombine(hash, HashAny(createInfo.magFilter));
hash = HashCombine(hash, HashAny(createInfo.minFilter));
hash = HashCombine(hash, HashAny(createInfo.mipmapMode));
hash = HashCombine(hash, HashAny(createInfo.addressModeU));
hash = HashCombine(hash, HashAny(createInfo.addressModeV));
hash = HashCombine(hash, HashAny(createInfo.addressModeW));
hash = HashCombine(hash, HashAny(static_cast<usize>(createInfo.mipLodBias * 1000))); // Resolution of 10^-3
hash = HashCombine(hash, HashAny(createInfo.anisotropyEnable));
hash = HashCombine(
hash,
HashAny(static_cast<usize>(createInfo.maxAnisotropy * 0x20))); // 32:1 Anisotropy is enough resolution
hash = HashCombine(hash, HashAny(createInfo.compareEnable));
hash = HashCombine(hash, HashAny(createInfo.compareOp));
hash = HashCombine(hash, HashAny(static_cast<usize>(createInfo.minLod * 1000))); // 0.001 resolution is enough.
hash = HashCombine(
hash,
HashAny(static_cast<usize>(createInfo.maxLod * 1000))); // 0.001 resolution is enough. (1 == NO Clamp)
hash = HashCombine(hash, HashAny(createInfo.borderColor));
hash = HashCombine(hash, HashAny(createInfo.unnormalizedCoordinates));
return hash;
}
};
namespace systems
{
// ====================================================================================================
#pragma region Creation Structs
// ====================================================================================================
// ----------------------------------------------------------------------------------------------------
#pragma region Image
// ----------------------------------------------------------------------------------------------------
struct Texture2DCreateInfo
{
vk::Format m_Format = vk::Format::eUndefined;
vk::Extent2D m_Extent = {};
cstr m_Name = nullptr;
bool m_IsSampled = true;
bool m_IsMipMapped = false;
bool m_IsStorage = false;
};
struct TextureCubeCreateInfo
{
vk::Format m_Format = vk::Format::eUndefined;
u32 m_Side = 0;
cstr m_Name = nullptr;
bool m_IsSampled = true;
bool m_IsMipMapped = false;
bool m_IsStorage = false;
};
struct AttachmentCreateInfo
{
vk::Format m_Format = vk::Format::eUndefined;
vk::Extent2D m_Extent = {};
cstr m_Name = nullptr;
};
struct DepthStencilImageCreateInfo
{
vk::Extent2D m_Extent = {};
cstr m_Name = nullptr;
};
#pragma endregion
// ----------------------------------------------------------------------------------------------------
#pragma region View
// ----------------------------------------------------------------------------------------------------
template <concepts::AnyImage TImage>
struct ViewCreateInfo
{
using ImageType = TImage;
Ref<ImageType> m_Image;
cstr m_Name;
vk::ImageViewType m_ViewType = vk::ImageViewType::e2D;
vk::ComponentMapping m_Components = {};
vk::ImageAspectFlags m_AspectMask = {};
eastl::optional<u8> m_MipLevelCount = eastl::nullopt;
eastl::optional<u8> m_LayerCount = eastl::nullopt;
u8 m_BaseMipLevel = 0;
u8 m_BaseLayer = 0;
[[nodiscard]] u8
GetMipLevelCount() const
{
return m_MipLevelCount.value_or(m_Image->m_MipLevels - m_BaseMipLevel);
}
[[nodiscard]] u8
GetLayerCount() const
{
return m_LayerCount.value_or(m_Image->m_LayerCount - m_BaseLayer);
}
explicit
operator vk::ImageViewCreateInfo() const
{
return {
.image = m_Image->m_Image,
.viewType = m_ViewType,
.format = m_Image->m_Format,
.components = m_Components,
.subresourceRange =
{
.aspectMask = m_AspectMask,
.baseMipLevel = m_BaseMipLevel,
.levelCount = GetMipLevelCount(),
.baseArrayLayer = m_BaseLayer,
.layerCount = GetLayerCount(),
},
};
}
explicit
operator ViewCreateInfo<Image>() const
{
return {
.m_Image = CastImage<Image>(m_Image),
.m_Name = m_Name,
.m_ViewType = m_ViewType,
.m_Components = m_Components,
.m_AspectMask = m_AspectMask,
.m_MipLevelCount = m_MipLevelCount,
.m_LayerCount = m_LayerCount,
.m_BaseMipLevel = m_BaseMipLevel,
.m_BaseLayer = m_BaseLayer,
};
}
};
#pragma endregion
// ----------------------------------------------------------------------------------------------------
#pragma region Sampler
// ----------------------------------------------------------------------------------------------------
struct SamplerCreateInfo
{
cstr m_Name = nullptr;
vk::SamplerCreateFlags m_Flags = {};
vk::Filter m_MagFilter = vk::Filter::eLinear;
vk::Filter m_MinFilter = vk::Filter::eLinear;
vk::SamplerMipmapMode m_MipmapMode = vk::SamplerMipmapMode::eLinear;
vk::SamplerAddressMode m_AddressModeU = vk::SamplerAddressMode::eRepeat;
vk::SamplerAddressMode m_AddressModeV = vk::SamplerAddressMode::eRepeat;
vk::SamplerAddressMode m_AddressModeW = vk::SamplerAddressMode::eRepeat;
vk::BorderColor m_BorderColor = vk::BorderColor::eFloatOpaqueBlack;
vk::CompareOp m_CompareOp = vk::CompareOp::eNever;
f32 m_MipLodBias = 0.0f;
f32 m_MaxAnisotropy = 16.0f;
f32 m_MinLod = 0;
f32 m_MaxLod = VK_LOD_CLAMP_NONE;
bool m_AnisotropyEnable = true;
bool m_CompareEnable = false;
bool m_NormalizedCoordinates = true;
explicit
operator vk::SamplerCreateInfo() const
{
return {
.flags = m_Flags,
.magFilter = m_MagFilter,
.minFilter = m_MinFilter,
.mipmapMode = m_MipmapMode,
.addressModeU = m_AddressModeU,
.addressModeV = m_AddressModeV,
.addressModeW = m_AddressModeW,
.mipLodBias = m_MipLodBias,
.anisotropyEnable = m_AnisotropyEnable,
.maxAnisotropy = m_MaxAnisotropy,
.compareEnable = m_CompareEnable,
.compareOp = m_CompareOp,
.minLod = m_MinLod,
.maxLod = m_MaxLod,
.borderColor = m_BorderColor,
.unnormalizedCoordinates = !m_NormalizedCoordinates,
};
}
};
#pragma endregion
// ----------------------------------------------------------------------------------------------------
#pragma region Pipeline
// ----------------------------------------------------------------------------------------------------
struct AttributeInfo
{
u32 m_Location;
u32 m_Offset;
enum class Format
{
eFloat32X4,
eFloat32X3,
eFloat32X2,
eFloat32,
} m_Format;
[[nodiscard]] vk::Format
GetFormat() const
{
switch (m_Format)
{
case Format::eFloat32X4:
return vk::Format::eR32G32B32A32Sfloat;
case Format::eFloat32X3:
return vk::Format::eR32G32B32Sfloat;
case Format::eFloat32X2:
return vk::Format::eR32G32Sfloat;
case Format::eFloat32:
return vk::Format::eR32Sfloat;
}
return vk::Format::eUndefined;
}
};
struct VertexInput
{
eastl::vector<AttributeInfo> m_Attribute;
u32 m_Stride;
bool m_IsPerInstance;
};
enum class ShaderType
{
eInvalid = 0,
eVertex = VK_SHADER_STAGE_VERTEX_BIT,
eTesselationControl = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
eTesselationEvaluation = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
eGeometry = VK_SHADER_STAGE_GEOMETRY_BIT,
eFragment = VK_SHADER_STAGE_FRAGMENT_BIT,
eCompute = VK_SHADER_STAGE_COMPUTE_BIT,
eTask = VK_SHADER_STAGE_TASK_BIT_EXT,
eMesh = VK_SHADER_STAGE_MESH_BIT_EXT,
eMax,
};
constexpr static u32 ShaderTypeCount = 8;
static_assert(static_cast<u32>(ShaderType::eMax) == 1 + (1 << (ShaderTypeCount - 1)));
struct ShaderInfo
{
std::string_view m_ShaderFile;
eastl::vector<std::string_view> m_EntryPoints;
};
struct GraphicsPipelineCreateInfo
{
enum class DepthTest
{
eEnabled,
eReadOnly,
eDisabled,
};
enum class CompareOp
{
eNever = 0x0,
eLessThan = 0x1,
eEqualTo = 0x2,
eGreaterThan = 0x4,
eLessThanOrEqualTo = eLessThan | eEqualTo,
eGreaterThanOrEqualTo = eGreaterThan | eEqualTo,
eNotEqualTo = eLessThan | eGreaterThan,
eAlways = eLessThan | eEqualTo | eGreaterThan,
};
eastl::fixed_vector<VertexInput, 4, false> m_VertexInputs;
eastl::fixed_vector<ShaderInfo, 4, false> m_Shaders;
DepthTest m_DepthTest = DepthTest::eEnabled;
CompareOp m_DepthOp = CompareOp::eLessThan;
cstr m_Name;
private:
friend RenderingDevice;
[[nodiscard]] vk::PipelineDepthStencilStateCreateInfo GetDepthStencilStateCreateInfo() const;
};
struct ComputePipelineCreateInfo
{
ShaderInfo m_Shader;
cstr m_Name;
};
#pragma endregion
// ----------------------------------------------------------------------------------------------------
#pragma region Device
// ----------------------------------------------------------------------------------------------------
PhysicalDevice DefaultPhysicalDeviceSelector(PhysicalDevices const &physicalDevices);
using PhysicalDeviceSelectorFn = PhysicalDevice (*)(PhysicalDevices const &);
static_assert(std::convertible_to<decltype(DefaultPhysicalDeviceSelector), PhysicalDeviceSelectorFn>);
struct DeviceCreateInfo
{
std::reference_wrapper<Window> m_Window;
Features m_Features;
cstr m_AppName = "Aster App";
Version m_AppVersion = {0, 1, 0};
PhysicalDeviceSelectorFn m_PhysicalDeviceSelector = DefaultPhysicalDeviceSelector;
std::span<u8> m_PipelineCacheData = {};
eastl::vector<cstr> m_ShaderSearchPaths;
bool m_UseBindless = true;
cstr m_Name = "Primary";
};
#pragma endregion
#pragma endregion
namespace _internal
{
class SyncServer;
}
class Receipt
{
void *m_Opaque;
explicit Receipt(void *opaque)
: m_Opaque{opaque}
{
}
friend _internal::SyncServer;
};
struct Frame
{
// Persistent
RenderingDevice *m_Device;
// TODO: ThreadSafe
_internal::GraphicsContextPool m_PrimaryPool;
_internal::TransferContextPool m_AsyncTransferPool;
_internal::ComputeContextPool m_AsyncComputePool;
vk::Fence m_FrameAvailableFence;
vk::Semaphore m_ImageAcquireSem;
vk::Semaphore m_RenderFinishSem;
u32 m_FrameIdx;
// Transient
vk::Image m_SwapchainImage;
vk::ImageView m_SwapchainImageView;
Size2D m_SwapchainSize;
u32 m_ImageIdx;
void Reset(u32 imageIdx, vk::Image swapchainImage, vk::ImageView swapchainImageView, Size2D swapchainSize);
GraphicsContext CreateGraphicsContext();
TransferContext CreateAsyncTransferContext();
ComputeContext CreateAsyncComputeContext();
void WaitUntilReady();
Frame() = default;
Frame(RenderingDevice &device, u32 frameIndex, u32 primaryQueueFamily, u32 asyncTransferQueue,
u32 asyncComputeQueue);
Frame(Frame &&other) noexcept;
Frame &operator=(Frame &&other) noexcept;
DISALLOW_COPY_AND_ASSIGN(Frame);
~Frame() = default;
};
class CommitManager;
class RenderingDevice final
{
public: // TODO: Temp
std::reference_wrapper<Window> m_Window;
Instance m_Instance;
Surface m_Surface;
Device m_Device;
Swapchain m_Swapchain;
std::unique_ptr<CommitManager> m_CommitManager;
// TODO: This is single-threaded.
vk::Queue m_PrimaryQueue;
u32 m_PrimaryQueueFamily;
vk::Queue m_TransferQueue;
u32 m_TransferQueueFamily;
vk::Queue m_ComputeQueue;
u32 m_ComputeQueueFamily;
_internal::OrderlessTransferContextPool m_TransferContextPool;
_internal::OrderlessComputeContextPool m_ComputeContextPool;
std::array<Frame, MAX_FRAMES_IN_FLIGHT> m_Frames;
u32 m_CurrentFrameIdx = 0;
public:
// ====================================================================================================
// Resource Management
// ====================================================================================================
//
// Buffer Management
// ----------------------------------------------------------------------------------------------------
[[nodiscard]] Ref<StorageBuffer> CreateStorageBuffer(usize size, cstr name = nullptr);
[[nodiscard]] Ref<IndexBuffer> CreateIndexBuffer(usize size, cstr name = nullptr);
[[nodiscard]] Ref<UniformBuffer> CreateUniformBuffer(usize size, cstr name = nullptr);
[[nodiscard]] Ref<StagingBuffer> CreateStagingBuffer(usize size, cstr name = nullptr);
[[nodiscard]] Ref<VertexBuffer> CreateVertexBuffer(usize size, cstr name = nullptr);
//
// Image Management
// ----------------------------------------------------------------------------------------------------
template <concepts::ImageInto<Texture> T>
[[nodiscard]] Ref<T>
CreateTexture2D(Texture2DCreateInfo const &createInfo)
{
return CastImage<T>(CreateTexture2D(createInfo));
}
template <concepts::ImageInto<TextureCube> T>
[[nodiscard]] Ref<T>
CreateTextureCube(TextureCubeCreateInfo const &createInfo)
{
return CastImage<T>(CreateTextureCube(createInfo));
}
[[nodiscard]] Ref<Image> CreateTexture2D(Texture2DCreateInfo const &createInfo);
[[nodiscard]] Ref<ImageCube> CreateTextureCube(TextureCubeCreateInfo const &createInfo);
[[nodiscard]] Ref<Image> CreateAttachment(AttachmentCreateInfo const &createInfo);
[[nodiscard]] Ref<Image> CreateDepthStencilImage(DepthStencilImageCreateInfo const &createInfo);
//
// View Management
// ----------------------------------------------------------------------------------------------------
template <concepts::View TImageView>
Ref<TImageView>
CreateView(ViewCreateInfo<typename TImageView::ImageType> const &createInfo)
{
return CastView<TImageView>(CreateView(ViewCreateInfo<Image>(createInfo)));
}
[[nodiscard]] Ref<ImageView> CreateView(ViewCreateInfo<Image> const &createInfo);
//
// Image - View Combined Management
// ----------------------------------------------------------------------------------------------------
template <concepts::ViewTo<Image> T>
[[nodiscard]] Ref<T>
CreateTexture2DWithView(Texture2DCreateInfo const &createInfo)
{
auto handle = CreateTexture2DWithView(createInfo);
return CastView<T>(handle);
}
template <concepts::ViewTo<ImageCube> T>
[[nodiscard]] Ref<T>
CreateTextureCubeWithView(TextureCubeCreateInfo const &createInfo)
{
auto handle = CreateTextureCubeWithView(createInfo);
return CastView<T>(handle);
}
[[nodiscard]] Ref<TextureView> CreateTexture2DWithView(Texture2DCreateInfo const &createInfo);
[[nodiscard]] Ref<ImageCubeView> CreateTextureCubeWithView(TextureCubeCreateInfo const &createInfo);
[[nodiscard]] Ref<ImageView> CreateAttachmentWithView(AttachmentCreateInfo const &createInfo);
[[nodiscard]] Ref<ImageView> CreateDepthStencilImageWithView(DepthStencilImageCreateInfo const &createInfo);
//
// Sampler Management
// ----------------------------------------------------------------------------------------------------
private:
eastl::hash_map<vk::SamplerCreateInfo, WeakRef<Sampler>> m_HashToSamplerIdx;
public:
Ref<Sampler> CreateSampler(SamplerCreateInfo const &createInfo);
//
// Pipeline
// ----------------------------------------------------------------------------------------------------
// TODO: Cache shader modules for reuse. Time to move to `slang`
private:
Slang::ComPtr<slang::IGlobalSession> m_GlobalSlangSession;
Slang::ComPtr<slang::ISession> m_SlangSession;
PipelineCreationError
CreateShaders(eastl::fixed_vector<vk::PipelineShaderStageCreateInfo, ShaderTypeCount, false> &shadersOut,
Slang::ComPtr<slang::IComponentType> &program, std::span<ShaderInfo const> const &shaders);
PipelineCreationError
CreatePipelineLayout(vk::PipelineLayout &pipelineLayout, Slang::ComPtr<slang::IComponentType> const &program);
public:
// Pipelines, unlike the other resources, are not ref-counted.
PipelineCreationError CreateGraphicsPipeline(Pipeline &pipeline, GraphicsPipelineCreateInfo const &createInfo);
PipelineCreationError CreateComputePipeline(Pipeline &pipeline, ComputePipelineCreateInfo const &createInfo);
//
// Frames
// ----------------------------------------------------------------------------------------------------
public:
Frame &GetNextFrame();
Size2D
GetSwapchainSize() const
{
return {m_Swapchain.m_Extent.width, m_Swapchain.m_Extent.height};
}
void
RegisterResizeCallback(Swapchain::FnResizeCallback &&callback)
{
m_Swapchain.RegisterResizeCallback(eastl::forward<Swapchain::FnResizeCallback>(callback));
}
void Present(Frame &frame, GraphicsContext &graphicsContext);
//
// Context
// ----------------------------------------------------------------------------------------------------
friend Context;
friend GraphicsContext;
friend TransferContext;
TransferContext CreateTransferContext();
ComputeContext CreateComputeContext();
Receipt Submit(Context &context);
//
// Sync
// ----------------------------------------------------------------------------------------------------
std::unique_ptr<_internal::SyncServer> m_SyncServer;
void WaitOn(Receipt recpt);
//
// RenderingDevice Methods
// ----------------------------------------------------------------------------------------------------
template <concepts::VkHandle T>
void
SetName(T const &object, cstr name) const
{
m_Device.SetName(object, name);
}
[[nodiscard]] vk::Queue
GetQueue(u32 const familyIndex, u32 const queueIndex) const
{
return m_Device.GetQueue(familyIndex, queueIndex);
}
[[nodiscard]] eastl::vector<u8>
DumpPipelineCache() const
{
return m_Device.DumpPipelineCache();
}
void
WaitIdle() const
{
m_Device.WaitIdle();
}
// Inner
// ----------------------------------------------------------------------------------------------------
[[nodiscard]] Device &
GetInner()
{
return m_Device;
}
[[nodiscard]] vk::Device &
GetHandle()
{
return m_Device.m_Device;
}
// Ctor/Dtor
// ----------------------------------------------------------------------------------------------------
explicit RenderingDevice(DeviceCreateInfo const &createInfo);
~RenderingDevice();
PIN_MEMORY(RenderingDevice);
};
} // namespace systems

View File

@ -1,145 +0,0 @@
// =============================================
// Aster: resource.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "aster/core/buffer.h"
#include "aster/core/image.h"
#include "aster/core/image_view.h"
#include <EASTL/intrusive_ptr.h>
namespace systems
{
// ====================================================================================================
#pragma region Util Methods
// ====================================================================================================
#pragma region Buffer
// ----------------------------------------------------------------------------------------------------
template <std::derived_from<Buffer> TTo, std::derived_from<Buffer> TFrom>
static Ref<TTo>
CastBuffer(Ref<TFrom> const &from)
{
if constexpr (not concepts::BufferInto<TFrom, TTo>)
assert(TTo::FLAGS & from->m_Flags);
return eastl::reinterpret_pointer_cast<TTo>(from);
}
#pragma endregion
#pragma region Image
// ----------------------------------------------------------------------------------------------------
template <std::derived_from<Image> TTo, std::derived_from<Image> TFrom>
static Ref<TTo>
CastImage(Ref<TFrom> const &from)
{
if constexpr (not concepts::ImageInto<TFrom, TTo>)
assert(TTo::FLAGS & from->m_Flags_);
return eastl::reinterpret_pointer_cast<TTo>(from);
}
#pragma endregion
#pragma region View
// ----------------------------------------------------------------------------------------------------
template <concepts::View TTo, std::derived_from<Image> TFrom>
static Ref<TTo>
CastView(Ref<View<TFrom>> const &from)
{
if constexpr (not concepts::ImageInto<TFrom, typename TTo::ImageType>)
assert(TTo::ImageType::FLAGS & from->m_Image->m_Flags_);
return eastl::reinterpret_pointer_cast<TTo>(from);
}
#pragma endregion
#pragma endregion
/**
* ResId manages the lifetime of the committed resource.
* @tparam T Type of the committed resource.
*/
template <typename T>
class ResId
{
using IdType = u32;
public:
constexpr static IdType INVALID = MaxValue<IdType>;
private:
IdType m_Index;
u32 m_Padding = 0; //< Slang DescriptorHandle are a pair of u32. TODO: Use as validation.
explicit ResId(IdType const index)
: m_Index{index}
{
AddRef();
}
friend class CommitManager;
public:
static ResId
Null()
{
return ResId{INVALID};
}
ResId(ResId const &other)
: m_Index{other.m_Index}
{
AddRef();
}
ResId(ResId &&other) noexcept
: m_Index{other.m_Index}
{
AddRef();
}
ResId &
operator=(ResId const &other)
{
if (this == &other)
return *this;
m_Index = other.m_Index;
AddRef();
return *this;
}
ResId &
operator=(ResId &&other) noexcept
{
if (this == &other)
return *this;
m_Index = other.m_Index;
AddRef();
return *this;
}
~ResId()
{
Release();
}
private:
void AddRef() const; ///< Increases the refcount in the CommitManager.
void Release() const; ///< Decreases the refcount in the CommitManager.
};
struct NullId
{
template <typename T>
operator ResId<T>()
{
return ResId<T>::Null();
}
};
} // namespace systems

View File

@ -1,79 +0,0 @@
// =============================================
// Aster: sync_server.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "aster/aster.h"
#include "context.h"
#include <EASTL/deque.h>
#include <EASTL/intrusive_list.h>
namespace systems
{
class Receipt;
class RenderingDevice;
} // namespace systems
namespace systems::_internal
{
struct TimelinePoint
{
u64 m_WaitValue;
u64 m_NextValue;
};
class SyncServer
{
struct Entry : eastl::intrusive_list_node
{
vk::Semaphore m_Semaphore;
TimelinePoint m_CurrentPoint;
ContextPool *m_AttachedPool;
explicit Entry(RenderingDevice &device);
void Destroy(RenderingDevice &device);
void Wait(RenderingDevice &device);
void Next();
void AttachPool(ContextPool *pool);
Entry(Entry &&) = default;
Entry &operator=(Entry &&) = default;
~Entry() = default;
DISALLOW_COPY_AND_ASSIGN(Entry);
};
RenderingDevice *m_Device;
eastl::deque<Entry> m_Allocations;
eastl::intrusive_list<Entry> m_FreeList;
public:
Receipt Allocate();
void Free(Receipt);
void WaitOn(Receipt);
private:
static Entry &GetEntry(Receipt receipt);
// Inner Alloc/Free functions.
Entry &AllocateEntry();
void FreeEntry(Entry &entry);
// Constructor/Destructor
explicit SyncServer(RenderingDevice &device);
public:
~SyncServer();
// Move Constructors.
SyncServer(SyncServer &&other) noexcept;
SyncServer &operator=(SyncServer &&other) noexcept;
friend RenderingDevice;
DISALLOW_COPY_AND_ASSIGN(SyncServer);
};
} // namespace systems::_internal

View File

@ -3,7 +3,4 @@
cmake_minimum_required(VERSION 3.13)
target_sources(aster_core
INTERFACE
"logger.h"
"freelist.h"
"files.h")
INTERFACE "logger.h")

View File

@ -1,15 +0,0 @@
// =============================================
// Aster: files.h
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "aster/core/constants.h"
#include <EASTL/span.h>
#include <EASTL/vector.h>
eastl::vector<u32> ReadFile(std::string_view fileName);
eastl::vector<u8> ReadFileBytes(std::string_view fileName, bool errorOnFail = true);
bool WriteFileBytes(std::string_view fileName, eastl::span<u8> data);

View File

@ -1,96 +0,0 @@
// =============================================
// Aster: freelist.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include <optional>
struct FreeListNode
{
FreeListNode *m_Next;
};
template <typename T>
concept FreeListCapable = sizeof(T) >= sizeof(FreeListNode);
template <FreeListCapable T>
struct FreeList
{
using Value = T;
using Reference = T &;
using ConstReference = T const &;
using Pointer = T *;
FreeListNode *m_Top;
FreeList()
: m_Top{nullptr}
{
}
FreeList(FreeList &&other) noexcept
: m_Top{Take(other.m_Top)}
{
}
FreeList &
operator=(FreeList &&other) noexcept
{
if (this == &other)
return *this;
m_Top = Take(other.m_Top);
return *this;
}
DISALLOW_COPY_AND_ASSIGN(FreeList);
~FreeList()
{
m_Top = nullptr;
}
[[nodiscard]] bool
Empty() const
{
return !m_Top;
}
[[nodiscard]] Reference
Pop()
{
assert(m_Top);
Reference ref = *reinterpret_cast<Pointer>(m_Top);
m_Top = m_Top->m_Next;
return ref;
}
void
Push(Reference ref)
{
auto next = reinterpret_cast<FreeListNode *>(&ref);
next->m_Next = m_Top;
m_Top = next;
}
[[nodiscard]] ConstReference
Peek() const
{
assert(m_Top);
return *m_Top;
}
[[nodiscard]] Reference
Peek()
{
assert(m_Top);
return *m_Top;
}
void
Clear()
{
m_Top = nullptr;
}
};

View File

@ -20,16 +20,16 @@ struct Logger
eVerbose,
};
u32 m_MinimumLoggingLevel{static_cast<u32>(LogType::eDebug)};
u32 m_MinimumLoggingLevel{Cast<u32>(LogType::eDebug)};
void
SetMinimumLoggingLevel(LogType logType)
{
m_MinimumLoggingLevel = static_cast<u32>(logType);
m_MinimumLoggingLevel = Cast<u32>(logType);
}
template <LogType TLogLevel>
constexpr static char const *
constexpr static const char *
ToCstr()
{
if constexpr (TLogLevel == LogType::eError)
@ -45,7 +45,7 @@ struct Logger
}
template <LogType TLogLevel>
constexpr static char const *
constexpr static const char *
ToColorCstr()
{
if constexpr (TLogLevel == LogType::eError)
@ -62,9 +62,9 @@ struct Logger
template <LogType TLogLevel>
void
Log(std::string_view const &message, char const *loc, u32 line) const
Log(const std::string_view &message, const char *loc, u32 line) const
{
if (static_cast<u32>(TLogLevel) <= m_MinimumLoggingLevel)
if (Cast<u32>(TLogLevel) <= m_MinimumLoggingLevel)
{
fmt::println("{}{} {} {} at {}:{}{}", ToColorCstr<TLogLevel>(), ToCstr<TLogLevel>(), message.data(),
ansi_color::Black, loc, line, ansi_color::Reset);
@ -79,9 +79,9 @@ struct Logger
template <LogType TLogLevel>
void
LogCond(char const *exprStr, std::string_view const &message, char const *loc, u32 line) const
LogCond(const char *exprStr, const std::string_view &message, const char *loc, u32 line) const
{
if (static_cast<u32>(TLogLevel) <= m_MinimumLoggingLevel)
if (Cast<u32>(TLogLevel) <= m_MinimumLoggingLevel)
{
fmt::println("{}{} ({}) {} {} at {}:{}{}", ToColorCstr<TLogLevel>(), ToCstr<TLogLevel>(), exprStr,
message.data(), ansi_color::Black, loc, line, ansi_color::Reset);
@ -103,26 +103,26 @@ extern Logger g_Logger;
#define INFO(...) g_Logger.Log<Logger::LogType::eInfo>(fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define ERROR_IF(expr, ...) \
if (static_cast<bool>(expr)) [[unlikely]] \
if (Cast<bool>(expr)) [[unlikely]] \
g_Logger.LogCond<Logger::LogType::eError>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define WARN_IF(expr, ...) \
if (static_cast<bool>(expr)) [[unlikely]] \
if (Cast<bool>(expr)) [[unlikely]] \
g_Logger.LogCond<Logger::LogType::eWarning>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define INFO_IF(expr, ...) \
if (static_cast<bool>(expr)) \
if (Cast<bool>(expr)) \
g_Logger.LogCond<Logger::LogType::eInfo>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define ELSE_IF_ERROR(expr, ...) \
; \
else if (static_cast<bool>(expr)) \
else if (Cast<bool>(expr)) \
[[unlikely]] g_Logger.LogCond<Logger::LogType::eError>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define ELSE_IF_WARN(expr, ...) \
; \
else if (static_cast<bool>(expr)) \
else if (Cast<bool>(expr)) \
[[unlikely]] g_Logger.LogCond<Logger::LogType::eWarning>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define ELSE_IF_INFO(expr, ...) \
; \
else if (static_cast<bool>(expr)) \
else if (Cast<bool>(expr)) \
g_Logger.LogCond<Logger::LogType::eInfo>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define ELSE_ERROR(...) \
@ -139,11 +139,11 @@ extern Logger g_Logger;
#define DEBUG(...) g_Logger.Log<Logger::LogType::eDebug>(fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define DEBUG_IF(expr, ...) \
if (static_cast<bool>(expr)) \
if (Cast<bool>(expr)) \
g_Logger.LogCond<Logger::LogType::eDebug>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define ELSE_IF_DEBUG(expr, ...) \
; \
else if (static_cast<bool>(expr)) \
else if (Cast<bool>(expr)) \
g_Logger.LogCond<Logger::LogType::eDebug>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define ELSE_DEBUG(...) \
; \
@ -174,11 +174,11 @@ extern Logger g_Logger;
#define VERBOSE(...) g_Logger.Log<Logger::LogType::eVerbose>(fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define VERBOSE_IF(expr, ...) \
if (static_cast<bool>(expr)) \
if (Cast<bool>(expr)) \
g_Logger.LogCond<Logger::LogType::eVerbose>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define ELSE_IF_VERBOSE(expr, ...) \
; \
else if (static_cast<bool>(expr)) \
else if (Cast<bool>(expr)) \
g_Logger.LogCond<Logger::LogType::eVerbose>(#expr, fmt::format(__VA_ARGS__), __FILE__, __LINE__)
#define ELSE_VERBOSE(...) \
; \
@ -207,5 +207,5 @@ extern Logger g_Logger;
#endif // !defined(VERBOSE_LOG_DISABLED)
#define DO(code) , code
#define ABORT(code) exit(static_cast<i32>(code))
#define ABORT(code) exit(Cast<i32>(code))
#define THEN_ABORT(code) , ABORT(code)

View File

@ -5,7 +5,7 @@ cmake_minimum_required(VERSION 3.13)
target_sources(aster_core
PRIVATE
"global.cpp"
"instance.cpp"
"context.cpp"
"physical_device.cpp"
"device.cpp"
"swapchain.cpp"
@ -13,5 +13,4 @@ PRIVATE
"buffer.cpp"
"image.cpp"
"surface.cpp"
"window.cpp"
"sampler.cpp")
"window.cpp")

View File

@ -1,25 +1,35 @@
// =============================================
// Aster: buffer.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "core/buffer.h"
#include "core/device.h"
Buffer::Buffer(Device const *device, usize const size, vk::BufferUsageFlags const bufferUsage,
VmaAllocationCreateFlags const allocationFlags, VmaMemoryUsage const memoryUsage, cstr const name)
void
Buffer::Destroy(const Device *device)
{
assert(!m_Buffer);
if (!IsValid() || !IsOwned())
return;
m_Device = device;
vmaDestroyBuffer(device->m_Allocator, m_Buffer, m_Allocation);
m_Size_ = 0;
}
void
Buffer::Allocate(const Device *device, usize size, vk::BufferUsageFlags bufferUsage,
VmaAllocationCreateFlags allocationFlags, VmaMemoryUsage memoryUsage, cstr name)
{
assert(!IsValid());
assert(size <= SIZE_MASK);
vk::BufferCreateInfo bufferCreateInfo = {
.size = size,
.usage = bufferUsage | vk::BufferUsageFlagBits::eShaderDeviceAddress,
.usage = bufferUsage,
.sharingMode = vk::SharingMode::eExclusive,
};
VmaAllocationCreateInfo const allocationCreateInfo = {
const VmaAllocationCreateInfo allocationCreateInfo = {
.flags = allocationFlags,
.usage = memoryUsage,
};
@ -27,88 +37,156 @@ Buffer::Buffer(Device const *device, usize const size, vk::BufferUsageFlags cons
VkBuffer buffer;
VmaAllocation allocation;
VmaAllocationInfo allocationInfo;
auto result = static_cast<vk::Result>(
vmaCreateBuffer(device->m_Allocator, reinterpret_cast<VkBufferCreateInfo *>(&bufferCreateInfo),
auto result = Cast<vk::Result>(vmaCreateBuffer(device->m_Allocator, Recast<VkBufferCreateInfo *>(&bufferCreateInfo),
&allocationCreateInfo, &buffer, &allocation, &allocationInfo));
ERROR_IF(Failed(result), "Could not allocate buffer. Cause: {}", result) THEN_ABORT(result);
// vk::MemoryPropertyFlags memoryPropertyFlags;
// vmaGetAllocationMemoryProperties(device->m_Allocator, allocation, Recast<VkMemoryPropertyFlags
// *>(&memoryPropertyFlags));
vk::MemoryPropertyFlags memoryPropertyFlags;
vmaGetAllocationMemoryProperties(device->m_Allocator, allocation,
Recast<VkMemoryPropertyFlags *>(&memoryPropertyFlags));
// TODO: Actually track Host Access
// bool hostAccessible = static_cast<bool>(memoryPropertyFlags & vk::MemoryPropertyFlagBits::eHostVisible);
// bool hostAccessible = Cast<bool>(memoryPropertyFlags & vk::MemoryPropertyFlagBits::eHostVisible);
m_Buffer = buffer;
m_Size = size;
m_Size_ = size | VALID_BUFFER_BIT | OWNED_BIT;
m_Allocation = allocation;
m_Mapped = static_cast<u8 *>(allocationInfo.pMappedData);
m_Flags = {};
if (bufferUsage & vk::BufferUsageFlagBits::eTransferSrc)
m_Flags |= FlagBits::eStaging;
if (bufferUsage & vk::BufferUsageFlagBits::eIndexBuffer)
m_Flags |= FlagBits::eIndex;
if (bufferUsage & vk::BufferUsageFlagBits::eIndirectBuffer)
m_Flags |= FlagBits::eIndirect;
if (bufferUsage & vk::BufferUsageFlagBits::eVertexBuffer)
m_Flags |= FlagBits::eVertex;
if (bufferUsage & vk::BufferUsageFlagBits::eUniformBuffer)
m_Flags |= FlagBits::eUniform;
if (bufferUsage & vk::BufferUsageFlagBits::eStorageBuffer)
m_Flags |= FlagBits::eStorage;
vk::BufferDeviceAddressInfo const addressInfo = {.buffer = m_Buffer};
m_DeviceAddr = m_Device->m_Device.getBufferAddress(&addressInfo);
m_Mapped = Cast<u8 *>(allocationInfo.pMappedData);
device->SetName(m_Buffer, name);
}
Buffer::Buffer(Buffer &&other) noexcept
: m_Device{Take(other.m_Device)}
, m_Buffer{Take(other.m_Buffer)}
, m_Allocation{Take(other.m_Allocation)}
, m_Mapped{Take(other.m_Mapped)}
, m_DeviceAddr{Take(other.m_DeviceAddr)}
, m_Size{Take(other.m_Size)}
{
}
Buffer &
Buffer::operator=(Buffer &&other) noexcept
{
if (this == &other)
return *this;
using std::swap;
swap(m_Device, other.m_Device);
swap(m_Buffer, other.m_Buffer);
swap(m_Allocation, other.m_Allocation);
swap(m_Mapped, other.m_Mapped);
swap(m_DeviceAddr, other.m_DeviceAddr);
swap(m_Size, other.m_Size);
return *this;
}
Buffer::~Buffer()
{
if (!m_Buffer)
return;
vmaDestroyBuffer(m_Device->m_Allocator, Take(m_Buffer), m_Allocation);
m_Size = 0;
}
uptr
Buffer::GetDeviceAddress() const
Buffer::GetDeviceAddress(const Device *device)
{
return m_DeviceAddr;
vk::BufferDeviceAddressInfo addressInfo = {.buffer = m_Buffer};
return device->m_Device.getBufferAddress(&addressInfo);
}
void
Buffer::Write(usize const offset, usize const size, void const *data) const
Buffer::Write(const Device *device, usize offset, usize size, const void *data)
{
assert(IsMapped());
assert(IsHostVisible());
if (!IsMapped())
{
void *mapped;
auto result = Cast<vk::Result>(vmaMapMemory(device->m_Allocator, m_Allocation, &mapped));
ERROR_IF(Failed(result), "Memory mapping failed. Cause: {}", result);
if (!Failed(result))
{
m_Mapped = Cast<u8 *>(mapped);
memcpy(m_Mapped + offset, data, size);
vmaUnmapMemory(device->m_Allocator, m_Allocation);
m_Mapped = nullptr;
}
}
else
{
memcpy(m_Mapped + offset, data, size);
}
// TODO: Debug this.
// auto result = static_cast<vk::Result>(vmaCopyMemoryToAllocation(device->m_Allocator, &data, m_Allocation, 0,
// size)); ERROR_IF(Failed(result), "Writing to buffer failed. Cause: {}", result) THEN_ABORT(result);
// auto result = Cast<vk::Result>(vmaCopyMemoryToAllocation(device->m_Allocator, &data, m_Allocation, 0, size));
// ERROR_IF(Failed(result), "Writing to buffer failed. Cause: {}", result) THEN_ABORT(result);
}
void
UniformBuffer::Init(const Device *device, const usize size, const cstr name)
{
Allocate(device, size, vk::BufferUsageFlagBits::eUniformBuffer,
VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT,
VMA_MEMORY_USAGE_AUTO, name);
}
void
StorageBuffer::Init(const Device *device, usize size, bool hostVisible, cstr name)
{
Init(device, size, hostVisible, false, name);
}
void
StorageBuffer::Init(const Device *device, usize size, bool hostVisible, bool deviceAddress, cstr name)
{
vk::BufferUsageFlags usage = vk::BufferUsageFlagBits::eStorageBuffer;
if (deviceAddress)
{
usage |= vk::BufferUsageFlagBits::eShaderDeviceAddress;
}
if (hostVisible)
{
Allocate(device, size, usage,
VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT,
VMA_MEMORY_USAGE_AUTO, name);
}
else
{
usage |= vk::BufferUsageFlagBits::eTransferDst;
Allocate(device, size, usage, 0,
VMA_MEMORY_USAGE_AUTO, name);
}
}
void
StorageIndexBuffer::Init(const Device *device, usize size, bool hostVisible, bool deviceAddress, cstr name)
{
vk::BufferUsageFlags usage = vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eIndexBuffer;
if (deviceAddress)
{
usage |= vk::BufferUsageFlagBits::eShaderDeviceAddress;
}
if (hostVisible)
{
Allocate(device, size, usage,
VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT,
VMA_MEMORY_USAGE_AUTO, name);
}
else
{
usage |= vk::BufferUsageFlagBits::eTransferDst;
Allocate(device, size, usage, 0, VMA_MEMORY_USAGE_AUTO, name);
}
}
void
IndirectBuffer::Init(const Device *device, usize size, bool hostVisible, cstr name)
{
vk::BufferUsageFlags usage = vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eIndirectBuffer | vk::BufferUsageFlagBits::eShaderDeviceAddress;
if (hostVisible)
{
Allocate(device, size, usage,
VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT,
VMA_MEMORY_USAGE_AUTO, name);
}
else
{
usage |= vk::BufferUsageFlagBits::eTransferDst;
Allocate(device, size, usage, 0, VMA_MEMORY_USAGE_AUTO, name);
}
}
void
VertexBuffer::Init(const Device *device, usize size, cstr name)
{
Allocate(device, size, vk::BufferUsageFlagBits::eVertexBuffer | vk::BufferUsageFlagBits::eTransferDst,
0, VMA_MEMORY_USAGE_AUTO, name);
}
void
IndexBuffer::Init(const Device *device, usize size, cstr name)
{
Allocate(device, size, vk::BufferUsageFlagBits::eIndexBuffer | vk::BufferUsageFlagBits::eTransferDst,
0, VMA_MEMORY_USAGE_AUTO, name);
}
void
StagingBuffer::Init(const Device *device, usize size, cstr name)
{
Allocate(device, size, vk::BufferUsageFlagBits::eTransferSrc,
VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT,
VMA_MEMORY_USAGE_AUTO, name);
}

View File

@ -1,46 +1,48 @@
// =============================================
// Aster: context.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "core/instance.h"
#include "core/window.h"
#include "core/context.h"
#include <EASTL/array.h>
#include <EASTL/fixed_vector.h>
VKAPI_ATTR b32 VKAPI_CALL
DebugCallback(vk::DebugUtilsMessageSeverityFlagBitsEXT const messageSeverity,
vk::DebugUtilsMessageTypeFlagsEXT const messageType,
vk::DebugUtilsMessengerCallbackDataEXT const *callbackData, [[maybe_unused]] void *userData)
DebugCallback(const VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
const VkDebugUtilsMessageTypeFlagsEXT messageType,
const VkDebugUtilsMessengerCallbackDataEXT *callbackData, [[maybe_unused]] void *userData)
{
using Severity = vk::DebugUtilsMessageSeverityFlagsEXT;
using SeverityBits = vk::DebugUtilsMessageSeverityFlagBitsEXT;
using MessageType = vk::DebugUtilsMessageTypeFlagsEXT;
using MessageTypeBits = vk::DebugUtilsMessageTypeFlagBitsEXT;
if (messageType & MessageTypeBits::eValidation)
const auto severity = Severity(messageSeverity);
if (MessageType(messageType) & MessageTypeBits::eValidation)
{
if (messageSeverity & SeverityBits::eError)
if (severity & SeverityBits::eError)
ERROR("{}", callbackData->pMessage);
if (messageSeverity & SeverityBits::eWarning)
if (severity & SeverityBits::eWarning)
WARN("{}", callbackData->pMessage);
if (messageSeverity & SeverityBits::eInfo)
if (severity & SeverityBits::eInfo)
INFO("{}", callbackData->pMessage);
if (messageSeverity & SeverityBits::eVerbose)
if (severity & SeverityBits::eVerbose)
VERBOSE("{}", callbackData->pMessage);
}
return false;
}
Instance::Instance(cstr const appName, Version const version, bool enableValidation)
Context::Context(const cstr appName, const Version version, bool enableValidation)
{
INFO_IF(enableValidation, "Validation Layers enabled");
// TODO Get/Check API Version
// Creating Instance
vk::ApplicationInfo const appInfo = {
const vk::ApplicationInfo appInfo = {
.pApplicationName = appName,
.applicationVersion = version.GetVkVersion(),
.pEngineName = PROJECT_NAME,
@ -48,7 +50,7 @@ Instance::Instance(cstr const appName, Version const version, bool enableValidat
.apiVersion = ASTER_API_VERSION,
};
vk::DebugUtilsMessengerCreateInfoEXT const debugUtilsMessengerCreateInfo = {
const vk::DebugUtilsMessengerCreateInfoEXT debugUtilsMessengerCreateInfo = {
.messageSeverity = vk::DebugUtilsMessageSeverityFlagBitsEXT::eError |
vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning |
vk::DebugUtilsMessageSeverityFlagBitsEXT::eInfo,
@ -59,23 +61,23 @@ Instance::Instance(cstr const appName, Version const version, bool enableValidat
.pUserData = nullptr,
};
u32 windowExtensionCount = 0;
cstr *windowExtensions = Window::GetInstanceExtensions(&windowExtensionCount);
eastl::fixed_vector<cstr, 3> instanceExtensions(windowExtensions, windowExtensions + windowExtensionCount);
u32 glfwExtensionCount = 0;
cstr *glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount);
eastl::fixed_vector<cstr, 3> instanceExtensions(glfwExtensions, glfwExtensions + glfwExtensionCount);
if (enableValidation)
{
instanceExtensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
}
vk::detail::DynamicLoader const dl;
const vk::DynamicLoader dl;
// ReSharper disable once CppInconsistentNaming
auto const vkGetInstanceProcAddr = dl.getProcAddress<PFN_vkGetInstanceProcAddr>("vkGetInstanceProcAddr");
const auto vkGetInstanceProcAddr = dl.getProcAddress<PFN_vkGetInstanceProcAddr>("vkGetInstanceProcAddr");
VULKAN_HPP_DEFAULT_DISPATCHER.init(vkGetInstanceProcAddr);
auto const instanceCreateInfo = vk::InstanceCreateInfo{
const auto instanceCreateInfo = vk::InstanceCreateInfo{
.pNext = enableValidation ? &debugUtilsMessengerCreateInfo : nullptr,
.pApplicationInfo = &appInfo,
.enabledExtensionCount = static_cast<u32>(instanceExtensions.size()),
.enabledExtensionCount = Cast<u32>(instanceExtensions.size()),
.ppEnabledExtensionNames = instanceExtensions.data(),
};
@ -95,11 +97,8 @@ Instance::Instance(cstr const appName, Version const version, bool enableValidat
}
}
Instance::~Instance()
Context::~Context()
{
if (!m_Instance)
return;
if (m_DebugMessenger)
{
m_Instance.destroy(m_DebugMessenger, nullptr);
@ -109,14 +108,14 @@ Instance::~Instance()
DEBUG("Instance destroyed");
}
Instance::Instance(Instance &&other) noexcept
Context::Context(Context &&other) noexcept
: m_Instance(Take(other.m_Instance))
, m_DebugMessenger(Take(other.m_DebugMessenger))
{
}
Instance &
Instance::operator=(Instance &&other) noexcept
Context &
Context::operator=(Context &&other) noexcept
{
if (this == &other)
return *this;

View File

@ -1,11 +1,11 @@
// =============================================
// Aster: device.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "core/device.h"
#include "core/instance.h"
#include "core/context.h"
#include "core/physical_device.h"
#include "core/queue_allocation.h"
@ -17,12 +17,18 @@ constexpr eastl::array DEVICE_EXTENSIONS = {
VK_KHR_SWAPCHAIN_EXTENSION_NAME,
};
Device::Device(Instance const &context, PhysicalDevice &physicalDevice, Features &enabledFeatures,
eastl::span<QueueAllocation> const &queueAllocations, eastl::span<u8> const &pipelineCacheData,
Device::Device(const Context *context, PhysicalDevice *physicalDevice, Features *enabledFeatures,
const eastl::vector<QueueAllocation> &queueAllocations, NameString &&name)
: Device(context, physicalDevice, enabledFeatures, queueAllocations, {}, std::move(name))
{
}
Device::Device(const Context *context, PhysicalDevice *physicalDevice, Features *enabledFeatures,
const eastl::vector<QueueAllocation> &queueAllocations, eastl::span<u8> &&pipelineCacheData,
NameString &&name)
: m_Name(std::move(name))
, m_PhysicalDevice(physicalDevice.m_PhysicalDevice)
, m_ValidationEnabled(context.m_DebugMessenger != nullptr)
, m_PhysicalDevice(physicalDevice->m_PhysicalDevice)
, m_ValidationEnabled(context->m_DebugMessenger != nullptr)
{
// Shouldn't have more than 4 deviceQueueFamilies in use anyway. Else we can heap
eastl::fixed_vector<vk::DeviceQueueCreateInfo, 4> deviceQueueCreateInfos;
@ -45,19 +51,19 @@ Device::Device(Instance const &context, PhysicalDevice &physicalDevice, Features
});
}
vk::PhysicalDeviceFeatures *deviceFeatures = &enabledFeatures.m_Vulkan10Features;
vk::PhysicalDeviceVulkan11Features *vulkan11Features = &enabledFeatures.m_Vulkan11Features;
vk::PhysicalDeviceVulkan12Features *vulkan12Features = &enabledFeatures.m_Vulkan12Features;
vk::PhysicalDeviceVulkan13Features *vulkan13Features = &enabledFeatures.m_Vulkan13Features;
vk::PhysicalDeviceFeatures *deviceFeatures = &enabledFeatures->m_Vulkan10Features;
vk::PhysicalDeviceVulkan11Features *vulkan11Features = &enabledFeatures->m_Vulkan11Features;
vk::PhysicalDeviceVulkan12Features *vulkan12Features = &enabledFeatures->m_Vulkan12Features;
vk::PhysicalDeviceVulkan13Features *vulkan13Features = &enabledFeatures->m_Vulkan13Features;
vulkan11Features->pNext = vulkan12Features;
vulkan12Features->pNext = vulkan13Features;
vk::DeviceCreateInfo deviceCreateInfo = {
.pNext = vulkan11Features,
.queueCreateInfoCount = static_cast<u32>(deviceQueueCreateInfos.size()),
.queueCreateInfoCount = Cast<u32>(deviceQueueCreateInfos.size()),
.pQueueCreateInfos = deviceQueueCreateInfos.data(),
.enabledExtensionCount = static_cast<u32>(DEVICE_EXTENSIONS.size()),
.enabledExtensionCount = Cast<u32>(DEVICE_EXTENSIONS.size()),
.ppEnabledExtensionNames = DEVICE_EXTENSIONS.data(),
.pEnabledFeatures = deviceFeatures,
};
@ -65,25 +71,25 @@ Device::Device(Instance const &context, PhysicalDevice &physicalDevice, Features
vk::Result result = m_PhysicalDevice.createDevice(&deviceCreateInfo, nullptr, &m_Device);
ERROR_IF(Failed(result), "Could not initialize Vulkan Device. Cause: {}", result)
THEN_ABORT(result)
ELSE_DEBUG("{} ({}) Initialized.", m_Name, physicalDevice.m_DeviceProperties.deviceName.data());
ELSE_DEBUG("{} ({}) Initialized.", m_Name, physicalDevice->m_DeviceProperties.deviceName.data());
SetName(m_Device, m_Name.data());
VmaVulkanFunctions vmaVulkanFunctions = {
.vkGetInstanceProcAddr = vk::detail::defaultDispatchLoaderDynamic.vkGetInstanceProcAddr,
.vkGetDeviceProcAddr = vk::detail::defaultDispatchLoaderDynamic.vkGetDeviceProcAddr,
.vkGetInstanceProcAddr = vk::defaultDispatchLoaderDynamic.vkGetInstanceProcAddr,
.vkGetDeviceProcAddr = vk::defaultDispatchLoaderDynamic.vkGetDeviceProcAddr,
};
VmaAllocatorCreateInfo const allocatorCreateInfo = {
const VmaAllocatorCreateInfo allocatorCreateInfo = {
.flags = VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT,
.physicalDevice = m_PhysicalDevice,
.device = m_Device,
.pVulkanFunctions = &vmaVulkanFunctions,
.instance = context.m_Instance,
.instance = context->m_Instance,
.vulkanApiVersion = ASTER_API_VERSION,
};
result = static_cast<vk::Result>(vmaCreateAllocator(&allocatorCreateInfo, &m_Allocator));
result = Cast<vk::Result>(vmaCreateAllocator(&allocatorCreateInfo, &m_Allocator));
ERROR_IF(Failed(result), "Memory allocator creation failed. Cause: {}", result)
DO(m_Device.destroy(nullptr))
THEN_ABORT(result)
@ -104,9 +110,6 @@ Device::Device(Instance const &context, PhysicalDevice &physicalDevice, Features
Device::~Device()
{
if (!m_Device)
return;
m_Device.destroy(m_PipelineCache, nullptr);
if (m_Allocator)
{
@ -120,7 +123,7 @@ Device::~Device()
}
vk::Queue
Device::GetQueue(u32 const familyIndex, u32 const queueIndex) const
Device::GetQueue(const u32 familyIndex, const u32 queueIndex) const
{
vk::Queue queue;
m_Device.getQueue(familyIndex, queueIndex, &queue);
@ -153,7 +156,6 @@ Device::Device(Device &&other) noexcept
, m_PhysicalDevice(Take(other.m_PhysicalDevice))
, m_Device(Take(other.m_Device))
, m_Allocator(Take(other.m_Allocator))
, m_PipelineCache(Take(other.m_PipelineCache))
{
}
@ -166,6 +168,5 @@ Device::operator=(Device &&other) noexcept
m_PhysicalDevice = Take(other.m_PhysicalDevice);
m_Device = Take(other.m_Device);
m_Allocator = Take(other.m_Allocator);
m_PipelineCache = Take(other.m_PipelineCache);
return *this;
}

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: global.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "core/global.h"
@ -26,11 +26,11 @@ struct MemorySize
{
usize totalBytes = bytes + m_Bytes;
m_Bytes = totalBytes % 1024;
usize const totalKb = m_Kilobytes + totalBytes / 1024;
const usize totalKb = m_Kilobytes + totalBytes / 1024;
m_Kilobytes = totalKb % 1024;
usize const totalMb = m_Megabytes + totalKb / 1024;
const usize totalMb = m_Megabytes + totalKb / 1024;
m_Megabytes = totalMb % 1024;
m_Gigabytes += static_cast<u16>(totalMb / 1024);
m_Gigabytes += Cast<u16>(totalMb / 1024);
return *this;
}
@ -56,23 +56,23 @@ struct fmt::formatter<MemorySize>
// return format_to(ctx.out(), "({}, {})", foo.a, foo.b); // --== KEY LINE ==--
if (mem.m_Gigabytes > 0)
{
return fmt::format_to(ctx.out(), "{}.{} GB", mem.m_Gigabytes, static_cast<u16>(mem.m_Megabytes / 1024.0));
return v10::format_to(ctx.out(), "{}.{} GB", mem.m_Gigabytes, Cast<u16>(mem.m_Megabytes / 1024.0));
}
if (mem.m_Megabytes > 0)
{
return fmt::format_to(ctx.out(), "{}.{} MB", mem.m_Megabytes, static_cast<u16>(mem.m_Kilobytes / 1024.0));
return v10::format_to(ctx.out(), "{}.{} MB", mem.m_Megabytes, Cast<u16>(mem.m_Kilobytes / 1024.0));
}
if (mem.m_Kilobytes > 0)
{
return fmt::format_to(ctx.out(), "{}.{} KB", mem.m_Kilobytes, static_cast<u16>(mem.m_Bytes / 1024.0));
return v10::format_to(ctx.out(), "{}.{} KB", mem.m_Kilobytes, Cast<u16>(mem.m_Bytes / 1024.0));
}
return fmt::format_to(ctx.out(), "{} Bytes", mem.m_Bytes);
return v10::format_to(ctx.out(), "{} Bytes", mem.m_Bytes);
}
};
void *
operator new[](size_t size, char const * /*pName*/, int flags, unsigned /*debugFlags*/, char const * /*file*/,
operator new[](size_t size, const char * /*pName*/, int flags, unsigned /*debugFlags*/, const char * /*file*/,
int /*line*/)
{
g_TotalAlloc += size;
@ -82,8 +82,8 @@ operator new[](size_t size, char const * /*pName*/, int flags, unsigned /*debugF
}
void *
operator new[](size_t size, size_t /*alignment*/, size_t /*alignmentOffset*/, char const * /*pName*/, int flags,
unsigned /*debugFlags*/, char const * /*file*/, int /*line*/)
operator new[](size_t size, size_t /*alignment*/, size_t /*alignmentOffset*/, const char * /*pName*/, int flags,
unsigned /*debugFlags*/, const char * /*file*/, int /*line*/)
{
g_TotalAlloc += size;

View File

@ -1,482 +1,425 @@
// =============================================
// Aster: image.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "core/image.h"
#include "core/device.h"
Image &
Image::operator=(Image &&other) noexcept
void
Image::Destroy(const Device *device)
{
if (this == &other)
return *this;
using std::swap;
swap(m_Device, other.m_Device);
swap(m_Image, other.m_Image);
swap(m_Allocation, other.m_Allocation);
swap(m_Extent, other.m_Extent);
swap(m_Format, other.m_Format);
swap(m_EmptyPadding_, other.m_EmptyPadding_);
swap(m_Flags_, other.m_Flags_);
swap(m_LayerCount, other.m_LayerCount);
swap(m_MipLevels, other.m_MipLevels);
return *this;
}
Image::~Image()
{
if (!IsValid())
if (!IsValid() || !IsOwned())
{
m_Flags_ = 0;
return;
}
vmaDestroyImage(m_Device->m_Allocator, Take(m_Image), m_Allocation);
m_Flags_ = {};
device->m_Device.destroy(m_View, nullptr);
vmaDestroyImage(device->m_Allocator, m_Image, m_Allocation);
m_Flags_ = 0;
}
void
Image::DestroyView(vk::ImageView const imageView) const
Texture::Init(const Device *device, const vk::Extent2D extent, vk::Format imageFormat, const bool isMipMapped,
const cstr name)
{
m_Device->m_Device.destroy(imageView, nullptr);
WARN_IF(!IsPowerOfTwo(extent.width) || !IsPowerOfTwo(extent.width), "Image {2} is {0}x{1} (Non Power of Two)",
extent.width, extent.height, name ? name : "<unnamed>");
const u8 mipLevels = isMipMapped ? 1 + Cast<u8>(floor(log2(eastl::max(extent.width, extent.height)))) : 1;
auto usage = vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferDst;
if (isMipMapped)
{
usage |= vk::ImageUsageFlagBits::eTransferSrc;
}
vk::ImageCreateInfo imageCreateInfo = {
.imageType = vk::ImageType::e2D,
.format = imageFormat,
.extent = ToExtent3D(extent, 1),
.mipLevels = mipLevels,
.arrayLayers = 1,
.samples = vk::SampleCountFlagBits::e1,
.tiling = vk::ImageTiling::eOptimal,
.usage = usage,
.sharingMode = vk::SharingMode::eExclusive,
.initialLayout = vk::ImageLayout::eUndefined,
};
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = {},
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
auto result = Cast<vk::Result>(vmaCreateImage(device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", name, result) THEN_ABORT(result);
vk::ImageView view;
vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::e2D,
.format = imageFormat,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = mipLevels,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", name, result) THEN_ABORT(result);
m_Image = image;
m_View = view;
m_Allocation = allocation;
m_Extent = imageCreateInfo.extent;
m_Flags_ = OWNED_BIT | VALID_BIT;
m_LayerCount = 1;
m_MipLevels = mipLevels;
device->SetName(m_Image, name);
}
//
// void
// Texture::Init(const Device *device, const vk::Extent2D extent, vk::Format imageFormat, const bool isMipMapped,
// const cstr name)
//{
// WARN_IF(!IsPowerOfTwo(extent.width) || !IsPowerOfTwo(extent.width), "Image {2} is {0}x{1} (Non Power of Two)",
// extent.width, extent.height, name ? name : "<unnamed>");
//
// const u8 mipLevels = isMipMapped ? 1 + static_cast<u8>(floor(log2(eastl::max(extent.width, extent.height)))) : 1;
//
// auto usage = vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferDst;
// if (isMipMapped)
// {
// usage |= vk::ImageUsageFlagBits::eTransferSrc;
// }
//
// vk::ImageCreateInfo imageCreateInfo = {
// .imageType = vk::ImageType::e2D,
// .format = imageFormat,
// .extent = ToExtent3D(extent, 1),
// .mipLevels = mipLevels,
// .arrayLayers = 1,
// .samples = vk::SampleCountFlagBits::e1,
// .tiling = vk::ImageTiling::eOptimal,
// .usage = usage,
// .sharingMode = vk::SharingMode::eExclusive,
// .initialLayout = vk::ImageLayout::eUndefined,
// };
// constexpr VmaAllocationCreateInfo allocationCreateInfo = {
// .flags = {},
// .usage = VMA_MEMORY_USAGE_AUTO,
// };
//
// VkImage image;
// VmaAllocation allocation;
// auto result = static_cast<vk::Result>(vmaCreateImage(device->m_Allocator, reinterpret_cast<VkImageCreateInfo
// *>(&imageCreateInfo),
// &allocationCreateInfo, &image, &allocation, nullptr));
// ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", name, result) THEN_ABORT(result);
//
// vk::ImageView view;
// vk::ImageViewCreateInfo imageViewCreateInfo = {
// .image = image,
// .viewType = vk::ImageViewType::e2D,
// .format = imageFormat,
// .components = {},
// .subresourceRange =
// {
// .aspectMask = vk::ImageAspectFlagBits::eColor,
// .baseMipLevel = 0,
// .levelCount = mipLevels,
// .baseArrayLayer = 0,
// .layerCount = 1,
// },
// };
// result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
// ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", name, result) THEN_ABORT(result);
//
// m_Device = device;
// m_Image = image;
// m_View = view;
// m_Allocation = allocation;
// m_Extent = imageCreateInfo.extent;
// m_LayerCount = 1;
// m_MipLevels = mipLevels;
//
// device->SetName(m_Image, name);
//}
//
///*
// Cube map Faces info.
//
// TODO: Correct this based on the actual layout for upside down viewport.
//
//| Axis | Layer | Up |
//|:----:|:-----:|:--:|
//| +x | 0 | -y |
//| -x | 1 | -y |
//| +y | 2 | +z |
//| -y | 3 | -z |
//| +z | 4 | -y |
//| -z | 5 | -y |
//
// Remember, we use upside down viewport.
//
//*/
//
// void
// TextureCube::Init(const Device *device, u32 cubeSide, vk::Format imageFormat, bool isMipMapped, cstr name)
//{
// WARN_IF(!IsPowerOfTwo(cubeSide), "Image Cube {1} has side {0}x{0} (Non Power of Two)", cubeSide,
// name ? name : "<unnamed>");
//
// const u8 mipLevels = isMipMapped ? 1 + static_cast<u8>(floor(log2(cubeSide))) : 1;
//
// auto usage = vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferDst;
// if (isMipMapped)
// {
// usage |= vk::ImageUsageFlagBits::eTransferSrc;
// }
//
// const vk::Extent3D extent = {.width = cubeSide, .height = cubeSide, .depth = 1};
//
// vk::ImageCreateInfo imageCreateInfo = {
// .flags = vk::ImageCreateFlagBits::eCubeCompatible,
// .imageType = vk::ImageType::e2D,
// .format = imageFormat,
// .extent = extent,
// .mipLevels = mipLevels,
// .arrayLayers = 6,
// .samples = vk::SampleCountFlagBits::e1,
// .tiling = vk::ImageTiling::eOptimal,
// .usage = usage,
// .sharingMode = vk::SharingMode::eExclusive,
// .initialLayout = vk::ImageLayout::eUndefined,
// };
// constexpr VmaAllocationCreateInfo allocationCreateInfo = {
// .flags = {},
// .usage = VMA_MEMORY_USAGE_AUTO,
// };
//
// VkImage image;
// VmaAllocation allocation;
// auto result = static_cast<vk::Result>(vmaCreateImage(device->m_Allocator, reinterpret_cast<VkImageCreateInfo
// *>(&imageCreateInfo),
// &allocationCreateInfo, &image, &allocation, nullptr));
// ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", name, result) THEN_ABORT(result);
//
// vk::ImageView view;
// vk::ImageViewCreateInfo imageViewCreateInfo = {
// .image = image,
// .viewType = vk::ImageViewType::eCube,
// .format = imageFormat,
// .components = {},
// .subresourceRange =
// {
// .aspectMask = vk::ImageAspectFlagBits::eColor,
// .baseMipLevel = 0,
// .levelCount = mipLevels,
// .baseArrayLayer = 0,
// .layerCount = 6,
// },
// };
// result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
// ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", name, result) THEN_ABORT(result);
//
// m_Device = device;
// m_Image = image;
// m_View = view;
// m_Allocation = allocation;
// m_Extent = extent;
// m_MipLevels = mipLevels;
// m_LayerCount = 6;
//
// device->SetName(m_Image, name);
// }
//
// void
// AttachmentImage::Init(const Device *device, vk::Extent2D extent, vk::Format imageFormat, cstr name)
//{
// vk::ImageCreateInfo imageCreateInfo = {
// .imageType = vk::ImageType::e2D,
// .format = imageFormat,
// .extent = ToExtent3D(extent, 1),
// .mipLevels = 1,
// .arrayLayers = 1,
// .samples = vk::SampleCountFlagBits::e1,
// .tiling = vk::ImageTiling::eOptimal,
// .usage = vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc,
// .sharingMode = vk::SharingMode::eExclusive,
// .initialLayout = vk::ImageLayout::eUndefined,
// };
// constexpr VmaAllocationCreateInfo allocationCreateInfo = {
// .flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
// .usage = VMA_MEMORY_USAGE_AUTO,
// };
//
// VkImage image;
// VmaAllocation allocation;
// auto result = static_cast<vk::Result>(vmaCreateImage(device->m_Allocator, reinterpret_cast<VkImageCreateInfo
// *>(&imageCreateInfo),
// &allocationCreateInfo, &image, &allocation, nullptr));
// ERROR_IF(Failed(result), "Could not allocate depth buffer. Cause: {}", result) THEN_ABORT(result);
//
// vk::ImageView view;
// vk::ImageViewCreateInfo imageViewCreateInfo = {
// .image = image,
// .viewType = vk::ImageViewType::e2D,
// .format = imageFormat,
// .components = {},
// .subresourceRange =
// {
// .aspectMask = vk::ImageAspectFlagBits::eColor,
// .baseMipLevel = 0,
// .levelCount = 1,
// .baseArrayLayer = 0,
// .layerCount = 1,
// },
// };
// result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
// ERROR_IF(Failed(result), "Could not create attachment image view {}. Cause: {}", name, result)
// THEN_ABORT(result);
//
// m_Device = device;
// m_Image = image;
// m_View = view;
// m_Allocation = allocation;
// m_Extent = imageCreateInfo.extent;
// m_MipLevels = 1;
// m_LayerCount = 1;
//
// device->SetName(m_Image, name);
// }
//
// void
// DepthImage::Init(const Device *device, vk::Extent2D extent, cstr name)
//{
// constexpr vk::Format imageFormat = vk::Format::eD24UnormS8Uint;
// vk::ImageCreateInfo imageCreateInfo = {
// .imageType = vk::ImageType::e2D,
// .format = imageFormat,
// .extent = ToExtent3D(extent, 1),
// .mipLevels = 1,
// .arrayLayers = 1,
// .samples = vk::SampleCountFlagBits::e1,
// .tiling = vk::ImageTiling::eOptimal,
// .usage = vk::ImageUsageFlagBits::eDepthStencilAttachment,
// .sharingMode = vk::SharingMode::eExclusive,
// .initialLayout = vk::ImageLayout::eUndefined,
// };
// constexpr VmaAllocationCreateInfo allocationCreateInfo = {
// .flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
// .usage = VMA_MEMORY_USAGE_AUTO,
// };
//
// VkImage image;
// VmaAllocation allocation;
// auto result = static_cast<vk::Result>(vmaCreateImage(device->m_Allocator, reinterpret_cast<VkImageCreateInfo
// *>(&imageCreateInfo),
// &allocationCreateInfo, &image, &allocation, nullptr));
// ERROR_IF(Failed(result), "Could not allocate depth buffer. Cause: {}", result) THEN_ABORT(result);
//
// vk::ImageView view;
// vk::ImageViewCreateInfo imageViewCreateInfo = {
// .image = image,
// .viewType = vk::ImageViewType::e2D,
// .format = imageFormat,
// .components = {},
// .subresourceRange =
// {
// .aspectMask = vk::ImageAspectFlagBits::eDepth,
// .baseMipLevel = 0,
// .levelCount = 1,
// .baseArrayLayer = 0,
// .layerCount = 1,
// },
// };
// result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
// ERROR_IF(Failed(result), "Could not create depth image view {}. Cause: {}", name, result) THEN_ABORT(result);
//
// m_Device = device;
// m_Image = image;
// m_View = view;
// m_Allocation = allocation;
// m_Extent = imageCreateInfo.extent;
// m_MipLevels = 1;
// m_LayerCount = 1;
//
// device->SetName(m_Image, name);
// }
//
// void
// StorageTexture::Init(const Device *device, vk::Extent2D extent, const vk::Format imageFormat, const bool isSampled,
// cstr name)
//{
// // Reasoning:
// // Transfer Src and Dst to copy to and from the buffer since Storage will often be loaded with info, and read for
// // results.
// auto usage =
// vk::ImageUsageFlagBits::eStorage | vk::ImageUsageFlagBits::eTransferSrc |
// vk::ImageUsageFlagBits::eTransferDst;
// if (isSampled)
// {
// WARN_IF(!IsPowerOfTwo(extent.width) || !IsPowerOfTwo(extent.width), "Image {2} is {0}x{1} (Non Power of
// Two)",
// extent.width, extent.height, name ? name : "<unnamed>");
// usage |= vk::ImageUsageFlagBits::eSampled;
// }
//
// vk::ImageCreateInfo imageCreateInfo = {
// .imageType = vk::ImageType::e2D,
// .format = imageFormat,
// .extent = ToExtent3D(extent, 1),
// .mipLevels = 1,
// .arrayLayers = 1,
// .samples = vk::SampleCountFlagBits::e1,
// .tiling = vk::ImageTiling::eOptimal,
// .usage = usage,
// .sharingMode = vk::SharingMode::eExclusive,
// .initialLayout = vk::ImageLayout::eUndefined,
// };
// constexpr VmaAllocationCreateInfo allocationCreateInfo = {
// .flags = {},
// .usage = VMA_MEMORY_USAGE_AUTO,
// };
//
// VkImage image;
// VmaAllocation allocation;
// auto result = static_cast<vk::Result>(vmaCreateImage(device->m_Allocator, reinterpret_cast<VkImageCreateInfo
// *>(&imageCreateInfo),
// &allocationCreateInfo, &image, &allocation, nullptr));
// ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", name, result) THEN_ABORT(result);
//
// vk::ImageView view;
// const vk::ImageViewCreateInfo imageViewCreateInfo = {
// .image = image,
// .viewType = vk::ImageViewType::e2D,
// .format = imageFormat,
// .components = {},
// .subresourceRange =
// {
// .aspectMask = vk::ImageAspectFlagBits::eColor,
// .baseMipLevel = 0,
// .levelCount = 1,
// .baseArrayLayer = 0,
// .layerCount = 1,
// },
// };
// result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
// ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", name, result) THEN_ABORT(result);
//
// m_Device = device;
// m_Image = image;
// m_View = view;
// m_Allocation = allocation;
// m_Extent = imageCreateInfo.extent;
// m_MipLevels = 1;
// m_LayerCount = 1;
//
// device->SetName(m_Image, name);
// }
//
// void
// StorageTextureCube::Init(const Device *device, u32 cubeSide, vk::Format imageFormat, bool isSampled, bool
// isMipMapped,
// cstr name)
//{
// // Reasoning:
// // Transfer Src and Dst to copy to and from the buffer since Storage will often be loaded with info, and read for
// // results.
// auto usage =
// vk::ImageUsageFlagBits::eStorage | vk::ImageUsageFlagBits::eTransferSrc |
// vk::ImageUsageFlagBits::eTransferDst;
// if (isSampled)
// {
// WARN_IF(!IsPowerOfTwo(cubeSide), "Image {1} is {0}x{0} (Non Power of Two)", cubeSide,
// name ? name : "<unnamed>");
// usage |= vk::ImageUsageFlagBits::eSampled;
// }
//
// const u8 mipLevels = isMipMapped ? 1 + static_cast<u8>(floor(log2(cubeSide))) : 1;
//
// vk::ImageCreateInfo imageCreateInfo = {
// .flags = vk::ImageCreateFlagBits::eCubeCompatible,
// .imageType = vk::ImageType::e2D,
// .format = imageFormat,
// .extent = {cubeSide, cubeSide, 1},
// .mipLevels = mipLevels,
// .arrayLayers = 6,
// .samples = vk::SampleCountFlagBits::e1,
// .tiling = vk::ImageTiling::eOptimal,
// .usage = usage,
// .sharingMode = vk::SharingMode::eExclusive,
// .initialLayout = vk::ImageLayout::eUndefined,
// };
// constexpr VmaAllocationCreateInfo allocationCreateInfo = {
// .flags = {},
// .usage = VMA_MEMORY_USAGE_AUTO,
// };
//
// VkImage image;
// VmaAllocation allocation;
// auto result = static_cast<vk::Result>(vmaCreateImage(device->m_Allocator, reinterpret_cast<VkImageCreateInfo
// *>(&imageCreateInfo),
// &allocationCreateInfo, &image, &allocation, nullptr));
// ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", name, result) THEN_ABORT(result);
//
// vk::ImageView view;
// const vk::ImageViewCreateInfo imageViewCreateInfo = {
// .image = image,
// .viewType = vk::ImageViewType::eCube,
// .format = imageFormat,
// .components = {},
// .subresourceRange =
// {
// .aspectMask = vk::ImageAspectFlagBits::eColor,
// .baseMipLevel = 0,
// .levelCount = mipLevels,
// .baseArrayLayer = 0,
// .layerCount = 6,
// },
// };
// result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
// ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", name, result) THEN_ABORT(result);
//
// m_Device = device;
// m_Image = image;
// m_View = view;
// m_Allocation = allocation;
// m_Extent = imageCreateInfo.extent;
// m_MipLevels = mipLevels;
// m_LayerCount = 6;
//
// device->SetName(m_Image, name);
// }
Image::Image(Image &&other) noexcept
: m_Device{Take(other.m_Device)}
, m_Image{Take(other.m_Image)}
, m_Allocation{Take(other.m_Allocation)}
, m_Extent{other.m_Extent}
, m_Format{other.m_Format}
, m_EmptyPadding_{other.m_EmptyPadding_}
, m_Flags_{other.m_Flags_}
, m_LayerCount{other.m_LayerCount}
, m_MipLevels{other.m_MipLevels}
/*
Cube map Faces info.
TODO: Correct this based on the actual layout for upside down viewport.
| Axis | Layer | Up |
|:----:|:-----:|:--:|
| +x | 0 | -y |
| -x | 1 | -y |
| +y | 2 | +z |
| -y | 3 | -z |
| +z | 4 | -y |
| -z | 5 | -y |
Remember, we use upside down viewport.
*/
void
TextureCube::Init(const Device *device, u32 cubeSide, vk::Format imageFormat, bool isMipMapped, cstr name)
{
WARN_IF(!IsPowerOfTwo(cubeSide), "Image Cube {1} has side {0}x{0} (Non Power of Two)", cubeSide, name ? name : "<unnamed>");
const u8 mipLevels = isMipMapped ? 1 + Cast<u8>(floor(log2(cubeSide))) : 1;
auto usage = vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferDst;
if (isMipMapped)
{
usage |= vk::ImageUsageFlagBits::eTransferSrc;
}
const vk::Extent3D extent = {.width = cubeSide, .height = cubeSide, .depth = 1};
vk::ImageCreateInfo imageCreateInfo = {
.flags = vk::ImageCreateFlagBits::eCubeCompatible,
.imageType = vk::ImageType::e2D,
.format = imageFormat,
.extent = extent,
.mipLevels = mipLevels,
.arrayLayers = 6,
.samples = vk::SampleCountFlagBits::e1,
.tiling = vk::ImageTiling::eOptimal,
.usage = usage,
.sharingMode = vk::SharingMode::eExclusive,
.initialLayout = vk::ImageLayout::eUndefined,
};
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = {},
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
auto result = Cast<vk::Result>(vmaCreateImage(device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", name, result) THEN_ABORT(result);
vk::ImageView view;
vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::eCube,
.format = imageFormat,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = mipLevels,
.baseArrayLayer = 0,
.layerCount = 6,
},
};
result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", name, result) THEN_ABORT(result);
m_Image = image;
m_View = view;
m_Allocation = allocation;
m_Extent = extent;
m_MipLevels = mipLevels;
m_Flags_ = OWNED_BIT | VALID_BIT;
m_LayerCount = 6;
device->SetName(m_Image, name);
}
Image::Image(Device const *device, vk::Image const image, VmaAllocation const allocation, vk::Extent3D const extent,
vk::Format const format, Flags const flags, u8 const layerCount, u8 const mipLevels)
: m_Device{device}
, m_Image{image}
, m_Allocation{allocation}
, m_Extent{extent}
, m_Format{format}
, m_Flags_{flags}
, m_LayerCount{layerCount}
, m_MipLevels{mipLevels}
void
AttachmentImage::Init(const Device *device, vk::Extent2D extent, vk::Format imageFormat, cstr name)
{
vk::ImageCreateInfo imageCreateInfo = {
.imageType = vk::ImageType::e2D,
.format = imageFormat,
.extent = ToExtent3D(extent, 1),
.mipLevels = 1,
.arrayLayers = 1,
.samples = vk::SampleCountFlagBits::e1,
.tiling = vk::ImageTiling::eOptimal,
.usage = vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc,
.sharingMode = vk::SharingMode::eExclusive,
.initialLayout = vk::ImageLayout::eUndefined,
};
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
auto result = Cast<vk::Result>(vmaCreateImage(device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate depth buffer. Cause: {}", result) THEN_ABORT(result);
vk::ImageView view;
vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::e2D,
.format = imageFormat,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create attachment image view {}. Cause: {}", name, result) THEN_ABORT(result);
m_Image = image;
m_View = view;
m_Allocation = allocation;
m_Extent = imageCreateInfo.extent;
m_MipLevels = 1;
m_Flags_ = OWNED_BIT | VALID_BIT;
m_LayerCount = 1;
device->SetName(m_Image, name);
}
void
DepthImage::Init(const Device *device, vk::Extent2D extent, cstr name)
{
constexpr vk::Format imageFormat = vk::Format::eD24UnormS8Uint;
vk::ImageCreateInfo imageCreateInfo = {
.imageType = vk::ImageType::e2D,
.format = imageFormat,
.extent = ToExtent3D(extent, 1),
.mipLevels = 1,
.arrayLayers = 1,
.samples = vk::SampleCountFlagBits::e1,
.tiling = vk::ImageTiling::eOptimal,
.usage = vk::ImageUsageFlagBits::eDepthStencilAttachment,
.sharingMode = vk::SharingMode::eExclusive,
.initialLayout = vk::ImageLayout::eUndefined,
};
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
auto result = Cast<vk::Result>(vmaCreateImage(device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate depth buffer. Cause: {}", result) THEN_ABORT(result);
vk::ImageView view;
vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::e2D,
.format = imageFormat,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eDepth,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create depth image view {}. Cause: {}", name, result) THEN_ABORT(result);
m_Image = image;
m_View = view;
m_Allocation = allocation;
m_Extent = imageCreateInfo.extent;
m_MipLevels = 1;
m_Flags_ = OWNED_BIT | VALID_BIT;
m_LayerCount = 1;
device->SetName(m_Image, name);
}
void
StorageTexture::Init(const Device *device, vk::Extent2D extent, const vk::Format imageFormat, const bool isSampled,
cstr name)
{
// Reasoning:
// Transfer Src and Dst to copy to and from the buffer since Storage will often be loaded with info, and read for
// results.
auto usage =
vk::ImageUsageFlagBits::eStorage | vk::ImageUsageFlagBits::eTransferSrc | vk::ImageUsageFlagBits::eTransferDst;
if (isSampled)
{
WARN_IF(!IsPowerOfTwo(extent.width) || !IsPowerOfTwo(extent.width), "Image {2} is {0}x{1} (Non Power of Two)",
extent.width, extent.height, name ? name : "<unnamed>");
usage |= vk::ImageUsageFlagBits::eSampled;
}
vk::ImageCreateInfo imageCreateInfo = {
.imageType = vk::ImageType::e2D,
.format = imageFormat,
.extent = ToExtent3D(extent, 1),
.mipLevels = 1,
.arrayLayers = 1,
.samples = vk::SampleCountFlagBits::e1,
.tiling = vk::ImageTiling::eOptimal,
.usage = usage,
.sharingMode = vk::SharingMode::eExclusive,
.initialLayout = vk::ImageLayout::eUndefined,
};
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = {},
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
auto result = Cast<vk::Result>(vmaCreateImage(device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", name, result) THEN_ABORT(result);
vk::ImageView view;
const vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::e2D,
.format = imageFormat,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
};
result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", name, result) THEN_ABORT(result);
m_Image = image;
m_View = view;
m_Allocation = allocation;
m_Extent = imageCreateInfo.extent;
m_MipLevels = 1;
m_Flags_ = OWNED_BIT | VALID_BIT;
m_LayerCount = 1;
device->SetName(m_Image, name);
}
void
StorageTextureCube::Init(const Device *device, u32 cubeSide, vk::Format imageFormat, bool isSampled, bool isMipMapped,
cstr name)
{
// Reasoning:
// Transfer Src and Dst to copy to and from the buffer since Storage will often be loaded with info, and read for
// results.
auto usage =
vk::ImageUsageFlagBits::eStorage | vk::ImageUsageFlagBits::eTransferSrc | vk::ImageUsageFlagBits::eTransferDst;
if (isSampled)
{
WARN_IF(!IsPowerOfTwo(cubeSide), "Image {1} is {0}x{0} (Non Power of Two)", cubeSide,
name ? name : "<unnamed>");
usage |= vk::ImageUsageFlagBits::eSampled;
}
const u8 mipLevels = isMipMapped ? 1 + Cast<u8>(floor(log2(cubeSide))) : 1;
vk::ImageCreateInfo imageCreateInfo = {
.flags = vk::ImageCreateFlagBits::eCubeCompatible,
.imageType = vk::ImageType::e2D,
.format = imageFormat,
.extent = {cubeSide, cubeSide, 1},
.mipLevels = mipLevels,
.arrayLayers = 6,
.samples = vk::SampleCountFlagBits::e1,
.tiling = vk::ImageTiling::eOptimal,
.usage = usage,
.sharingMode = vk::SharingMode::eExclusive,
.initialLayout = vk::ImageLayout::eUndefined,
};
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = {},
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
auto result = Cast<vk::Result>(vmaCreateImage(device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", name, result) THEN_ABORT(result);
vk::ImageView view;
const vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::eCube,
.format = imageFormat,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = mipLevels,
.baseArrayLayer = 0,
.layerCount = 6,
},
};
result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", name, result) THEN_ABORT(result);
m_Image = image;
m_View = view;
m_Allocation = allocation;
m_Extent = imageCreateInfo.extent;
m_MipLevels = mipLevels;
m_Flags_ = OWNED_BIT | VALID_BIT;
m_LayerCount = 6;
device->SetName(m_Image, name);
}

View File

@ -1,15 +1,15 @@
// =============================================
// Aster: physical_device.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "core/physical_device.h"
#include "core/instance.h"
#include "core/context.h"
#include "core/surface.h"
[[nodiscard]] vk::SurfaceCapabilitiesKHR
GetSurfaceCapabilities(vk::PhysicalDevice const physicalDevice, vk::SurfaceKHR const surface)
GetSurfaceCapabilities(const vk::PhysicalDevice physicalDevice, const vk::SurfaceKHR surface)
{
vk::SurfaceCapabilitiesKHR surfaceCapabilities;
@ -21,7 +21,7 @@ GetSurfaceCapabilities(vk::PhysicalDevice const physicalDevice, vk::SurfaceKHR c
}
[[nodiscard]] eastl::vector<vk::SurfaceFormatKHR>
GetSurfaceFormats(vk::PhysicalDevice const physicalDevice, vk::SurfaceKHR const surface)
GetSurfaceFormats(const vk::PhysicalDevice physicalDevice, const vk::SurfaceKHR surface)
{
// vk::Result::eIncomplete should not occur in this function. The rest are errors. Thus, abort is allowed.
u32 count = 0;
@ -38,7 +38,7 @@ GetSurfaceFormats(vk::PhysicalDevice const physicalDevice, vk::SurfaceKHR const
}
[[nodiscard]] eastl::vector<vk::PresentModeKHR>
GetSurfacePresentModes(vk::PhysicalDevice const physicalDevice, vk::SurfaceKHR const surface)
GetSurfacePresentModes(const vk::PhysicalDevice physicalDevice, const vk::SurfaceKHR surface)
{
// vk::Result::eIncomplete should not occur in this function. The rest are errors. Thus, abort is allowed.
u32 count = 0;
@ -55,11 +55,11 @@ GetSurfacePresentModes(vk::PhysicalDevice const physicalDevice, vk::SurfaceKHR c
}
[[nodiscard]] bool
GetQueuePresentSupport(u32 const queueFamilyIndex, vk::SurfaceKHR const surface,
vk::PhysicalDevice const physicalDevice)
GetQueuePresentSupport(const u32 queueFamilyIndex, const vk::SurfaceKHR surface,
const vk::PhysicalDevice physicalDevice)
{
b32 supported = false;
vk::Result const result = physicalDevice.getSurfaceSupportKHR(queueFamilyIndex, surface, &supported);
const vk::Result result = physicalDevice.getSurfaceSupportKHR(queueFamilyIndex, surface, &supported);
ERROR_IF(Failed(result), "Could not get queue family surface support. Cause: {}", result)
THEN_ABORT(result);
@ -67,7 +67,7 @@ GetQueuePresentSupport(u32 const queueFamilyIndex, vk::SurfaceKHR const surface,
}
[[nodiscard]] eastl::fixed_vector<vk::QueueFamilyProperties, 32>
GetQueueFamilyProperties(vk::PhysicalDevice const physicalDevice)
GetQueueFamilyProperties(const vk::PhysicalDevice physicalDevice)
{
// Devices rarely have more than 32 queue families. Thus fixed vector
u32 count = 0;
@ -81,7 +81,7 @@ GetQueueFamilyProperties(vk::PhysicalDevice const physicalDevice)
// Size 384 return.
[[nodiscard]] eastl::vector<QueueFamilyInfo>
GetQueueFamilies(vk::SurfaceKHR const surface, vk::PhysicalDevice const physicalDevice)
GetQueueFamilies(const vk::SurfaceKHR surface, const vk::PhysicalDevice physicalDevice)
{
auto queueFamilyProperties = GetQueueFamilyProperties(physicalDevice);
@ -126,7 +126,7 @@ GetQueueFamilies(vk::SurfaceKHR const surface, vk::PhysicalDevice const physical
return queueFamilyInfos;
}
PhysicalDevice::PhysicalDevice(vk::SurfaceKHR const surface, vk::PhysicalDevice const physicalDevice)
PhysicalDevice::PhysicalDevice(const vk::SurfaceKHR surface, const vk::PhysicalDevice physicalDevice)
{
physicalDevice.getProperties(&m_DeviceProperties);
physicalDevice.getFeatures(&m_DeviceFeatures);
@ -139,7 +139,7 @@ PhysicalDevice::PhysicalDevice(vk::SurfaceKHR const surface, vk::PhysicalDevice
}
eastl::fixed_vector<vk::PhysicalDevice, 8>
EnumeratePhysicalDevices(vk::Instance const instance)
EnumeratePhysicalDevices(const vk::Instance instance)
{
u32 count = 0;
vk::Result result = instance.enumeratePhysicalDevices(&count, nullptr);
@ -154,10 +154,11 @@ EnumeratePhysicalDevices(vk::Instance const instance)
return physicalDevices;
}
PhysicalDevices::PhysicalDevices(Surface const &surface, Instance const &context)
PhysicalDevices::PhysicalDevices(const Surface *surface, const Context *context)
{
for (auto physicalDevices = EnumeratePhysicalDevices(context.m_Instance); auto physicalDevice : physicalDevices)
auto physicalDevices = EnumeratePhysicalDevices(context->m_Instance);
for (auto physicalDevice : physicalDevices)
{
this->emplace_back(surface.m_Surface, physicalDevice);
this->emplace_back(surface->m_Surface, physicalDevice);
}
}

View File

@ -1,28 +1,24 @@
// =============================================
// Aster: pipeline.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "core/pipeline.h"
#include "core/device.h"
Pipeline::Pipeline(Device const *device, vk::PipelineLayout const layout, vk::Pipeline const pipeline,
eastl::vector<vk::DescriptorSetLayout> &&setLayouts, Kind const kind)
: m_Device{device}
, m_Layout{layout}
, m_Pipeline{pipeline}
, m_SetLayouts{std::move(setLayouts)}
, m_Kind{kind}
Pipeline::Pipeline(const Device *device, vk::PipelineLayout layout, vk::Pipeline pipeline,
eastl::vector<vk::DescriptorSetLayout> &&setLayouts)
: m_Device(device)
, m_Layout(layout)
, m_Pipeline(pipeline)
, m_SetLayouts(std::move(setLayouts))
{
}
Pipeline::~Pipeline()
{
if (!m_Device || !m_Pipeline)
return;
for (auto const setLayout : m_SetLayouts)
for (const auto setLayout : m_SetLayouts)
{
m_Device->m_Device.destroy(setLayout, nullptr);
}

View File

@ -1,40 +0,0 @@
// =============================================
// Aster: sampler.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "core/sampler.h"
#include "core/device.h"
Sampler::~Sampler()
{
if (!IsValid())
return;
m_Device->m_Device.destroy(Take(m_Sampler), nullptr);
}
Sampler::Sampler(Device const *device, vk::SamplerCreateInfo const &samplerCreateInfo, cstr name)
{
m_Device = device;
auto const result = device->m_Device.createSampler(&samplerCreateInfo, nullptr, &m_Sampler);
ERROR_IF(Failed(result), "Could not create a sampler {}", name ? name : "<unnamed>") THEN_ABORT(-1);
}
Sampler &
Sampler::operator=(Sampler &&other) noexcept
{
if (this == &other)
return *this;
using std::swap;
swap(m_Device, other.m_Device);
swap(m_Sampler, other.m_Sampler);
return *this;
}
Sampler::Sampler(Sampler &&other) noexcept
: m_Device{other.m_Device}
, m_Sampler{Take(other.m_Sampler)}
{
}

View File

@ -1,19 +1,20 @@
// =============================================
// Aster: surface.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "core/surface.h"
#include "core/instance.h"
#include "core/context.h"
#include "core/window.h"
Surface::Surface(Instance &context, Window const &window)
: m_Context(&context)
Surface::Surface(Context *context, const Window *window, cstr name)
: m_Context(context)
, m_Name(name)
{
VkSurfaceKHR surface;
auto result = static_cast<vk::Result>(
glfwCreateWindowSurface(static_cast<VkInstance>(m_Context->m_Instance), window.m_Window, nullptr, &surface));
auto result = Cast<vk::Result>(
glfwCreateWindowSurface(Cast<VkInstance>(m_Context->m_Instance), window->m_Window, nullptr, &surface));
ERROR_IF(Failed(result), "Failed to create Surface with {}", result)
THEN_ABORT(result)
ELSE_DEBUG("Surface {} Created", m_Name);
@ -22,14 +23,14 @@ Surface::Surface(Instance &context, Window const &window)
Surface::~Surface()
{
if (!m_Context || !m_Context->m_Instance || !m_Surface)
return;
if (m_Context && m_Surface)
{
m_Context->m_Instance.destroy(m_Surface, nullptr);
DEBUG("Surface Destroyed");
m_Surface = nullptr;
m_Context = nullptr;
}
}
Surface::Surface(Surface &&other) noexcept

View File

@ -1,6 +1,6 @@
/// =============================================
// Aster: swapchain.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// ==============================================
#include "core/swapchain.h"
@ -11,8 +11,9 @@
[[nodiscard]] vk::Extent2D GetExtent(Size2D size, vk::SurfaceCapabilitiesKHR *surfaceCapabilities);
Swapchain::Swapchain(Surface const &surface, Device const &device, Size2D size)
: m_Device(&device)
Swapchain::Swapchain(const Surface *surface, const Device *device, Size2D size, NameString &&name)
: m_Device(device)
, m_Name(std::move(name))
, m_Format(vk::Format::eUndefined)
{
this->Create(surface, size);
@ -26,11 +27,11 @@ Swapchain::~Swapchain()
Swapchain::Swapchain(Swapchain &&other) noexcept
: m_Device(other.m_Device)
, m_Swapchain(Take(other.m_Swapchain))
, m_Name(std::move(other.m_Name))
, m_Extent(other.m_Extent)
, m_Format(other.m_Format)
, m_Images(std::move(other.m_Images))
, m_ImageViews(std::move(other.m_ImageViews))
, m_ResizeCallbacks(std::move(other.m_ResizeCallbacks))
{
}
@ -41,32 +42,32 @@ Swapchain::operator=(Swapchain &&other) noexcept
return *this;
m_Device = other.m_Device;
m_Swapchain = Take(other.m_Swapchain);
m_Name = std::move(other.m_Name);
m_Extent = other.m_Extent;
m_Format = other.m_Format;
m_Images = std::move(other.m_Images);
m_ImageViews = std::move(other.m_ImageViews);
m_ResizeCallbacks = std::move(other.m_ResizeCallbacks);
return *this;
}
void
Swapchain::Create(Surface const &surface, Size2D size)
Swapchain::Create(const Surface *surface, Size2D size)
{
auto surfaceCapabilities = GetSurfaceCapabilities(m_Device->m_PhysicalDevice, surface.m_Surface);
auto surfaceCapabilities = GetSurfaceCapabilities(m_Device->m_PhysicalDevice, surface->m_Surface);
m_Extent = GetExtent(size, &surfaceCapabilities);
while (m_Extent.width == 0 || m_Extent.height == 0)
{
glfwWaitEvents();
surfaceCapabilities = GetSurfaceCapabilities(m_Device->m_PhysicalDevice, surface.m_Surface);
surfaceCapabilities = GetSurfaceCapabilities(m_Device->m_PhysicalDevice, surface->m_Surface);
m_Extent = GetExtent(size, &surfaceCapabilities);
}
auto surfaceFormats = GetSurfaceFormats(m_Device->m_PhysicalDevice, surface.m_Surface);
auto presentModes = GetSurfacePresentModes(m_Device->m_PhysicalDevice, surface.m_Surface);
auto surfaceFormats = GetSurfaceFormats(m_Device->m_PhysicalDevice, surface->m_Surface);
auto presentModes = GetSurfacePresentModes(m_Device->m_PhysicalDevice, surface->m_Surface);
m_Format = vk::Format::eUndefined;
auto swapchainColorSpace = vk::ColorSpaceKHR::eSrgbNonlinear;
vk::ColorSpaceKHR swapchainColorSpace = vk::ColorSpaceKHR::eSrgbNonlinear;
for (auto [format, colorSpace] : surfaceFormats)
{
if (format == vk::Format::eB8G8R8A8Srgb && colorSpace == vk::ColorSpaceKHR::eSrgbNonlinear)
@ -83,8 +84,8 @@ Swapchain::Create(Surface const &surface, Size2D size)
swapchainColorSpace = colorSpace;
}
auto swapchainPresentMode = vk::PresentModeKHR::eFifo;
for (auto const presentMode : presentModes)
vk::PresentModeKHR swapchainPresentMode = vk::PresentModeKHR::eFifo;
for (const auto presentMode : presentModes)
{
if (presentMode == vk::PresentModeKHR::eMailbox)
{
@ -94,14 +95,16 @@ Swapchain::Create(Surface const &surface, Size2D size)
}
u32 swapchainImageCount = 3;
u32 maxImageCount =
glm::max(swapchainImageCount, glm::max(surfaceCapabilities.maxImageCount, surfaceCapabilities.minImageCount));
swapchainImageCount = glm::clamp(swapchainImageCount, surfaceCapabilities.minImageCount, maxImageCount);
if (surfaceCapabilities.maxImageCount > 0)
{
swapchainImageCount =
glm::clamp(swapchainImageCount, surfaceCapabilities.minImageCount, surfaceCapabilities.maxImageCount);
}
// TODO: Note that different queues might need the images to be shared.
vk::SwapchainCreateInfoKHR const swapchainCreateInfo = {
.surface = surface.m_Surface,
const vk::SwapchainCreateInfoKHR swapchainCreateInfo = {
.surface = surface->m_Surface,
.minImageCount = swapchainImageCount,
.imageFormat = m_Format,
.imageColorSpace = swapchainColorSpace,
@ -117,30 +120,28 @@ Swapchain::Create(Surface const &surface, Size2D size)
};
vk::Device device = m_Device->m_Device;
NameString name = "Swapchain of ";
name += m_Device->m_Name;
vk::SwapchainKHR swapchain;
vk::Result result = device.createSwapchainKHR(&swapchainCreateInfo, nullptr, &swapchain);
ERROR_IF(Failed(result), "'{}' creation failed. Cause {}", name, result)
ERROR_IF(Failed(result), "Swapchain {} creation failed. Cause {}", m_Name, result)
THEN_ABORT(result)
ELSE_DEBUG("Created '{}'", name);
ELSE_DEBUG("Created Swapchain '{}'", m_Name);
// Irrelevant on the first run. Required for re-creation.
Cleanup();
m_Swapchain = swapchain;
m_Device->SetName(m_Swapchain, m_Device->m_Name.data());
m_Device->SetName(m_Swapchain, m_Name.data());
result = device.getSwapchainImagesKHR(m_Swapchain, &swapchainImageCount, nullptr);
ERROR_IF(Failed(result), "Failed getting {}'s images. Cause {}", name, result)
ERROR_IF(Failed(result), "Failed getting swapchain {}'s images. Cause {}", m_Name, result)
THEN_ABORT(result);
// Managed by the Swapchain.
m_Images.resize(swapchainImageCount, nullptr);
m_Images.resize(swapchainImageCount);
result = device.getSwapchainImagesKHR(m_Swapchain, &swapchainImageCount, m_Images.data());
ERROR_IF(Failed(result), "Failed getting {}'s images. Cause {}", name, result)
ERROR_IF(Failed(result), "Failed getting swapchain {}'s images. Cause {}", m_Name, result)
THEN_ABORT(result);
vk::ImageViewCreateInfo viewCreateInfo = {
@ -164,7 +165,7 @@ Swapchain::Create(Surface const &surface, Size2D size)
vk::ImageView imageView;
result = device.createImageView(&viewCreateInfo, nullptr, &imageView);
ERROR_IF(Failed(result), "Failed creating {}'s image view [{}]. Cause {}", name, index, result)
ERROR_IF(Failed(result), "Failed creating swapchain {}'s image view [{}]. Cause {}", m_Name, index, result)
THEN_ABORT(result);
m_ImageViews.push_back(imageView);
@ -172,7 +173,7 @@ Swapchain::Create(Surface const &surface, Size2D size)
++index;
}
DEBUG("{} Image Views created.", name);
DEBUG("Swapchain {} Image Views created.", m_Name);
for (auto &callback : m_ResizeCallbacks)
{
@ -183,31 +184,24 @@ Swapchain::Create(Surface const &surface, Size2D size)
void
Swapchain::RegisterResizeCallback(FnResizeCallback &&callback)
{
m_ResizeCallbacks.emplace_back(std::move(callback));
m_ResizeCallbacks.emplace_back(callback);
}
void
Swapchain::Cleanup()
{
if (!m_Swapchain)
return;
NameString name = "Swapchain of ";
name += m_Device->m_Name;
for (auto const imageView : m_ImageViews)
if (!m_ImageViews.empty()) // Don't want the condition in the logs.
DEBUG("Swapchain {} Image Views destroyed.", m_Name);
for (const auto imageView : m_ImageViews)
{
m_Device->m_Device.destroy(imageView, nullptr);
}
if (!m_ImageViews.empty()) // Don't want the condition in the logs.
DEBUG("Swapchain {} Image Views destroyed.", name);
m_ImageViews.clear();
m_Images.clear();
if (m_Swapchain)
{
m_Device->m_Device.destroy(m_Swapchain, nullptr);
m_Swapchain = nullptr;
DEBUG("Swapchain '{}' destroyed.", name);
DEBUG("Swapchain '{}' destroyed.", m_Name);
}
}

View File

@ -1,40 +1,16 @@
// =============================================
// Aster: window.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "core/window.h"
#include "core/instance.h"
#include "core/context.h"
#include "util/logger.h"
std::atomic_uint64_t Window::m_WindowCount = 0;
std::atomic_bool Window::m_IsGlfwInit = false;
void
Window::SetupLibrary()
{
if (!m_IsGlfwInit)
{
if (!glfwInit())
{
char const *error = nullptr;
auto const code = glfwGetError(&error);
ERROR("GLFW Init failed. Cause: ({}) {}", code, error)
THEN_ABORT(code);
}
m_WindowCount = 0;
m_IsGlfwInit = true;
}
}
cstr *
Window::GetInstanceExtensions(u32 *extensionCount)
{
SetupLibrary();
return glfwGetRequiredInstanceExtensions(extensionCount);
}
void
Window::RequestExit() const noexcept
{
@ -42,15 +18,15 @@ Window::RequestExit() const noexcept
}
void
Window::SetWindowSize(vk::Extent2D const &extent) const noexcept
Window::SetWindowSize(const vk::Extent2D &extent) const noexcept
{
SetWindowSize(extent.width, extent.height);
}
void
Window::SetWindowSize(u32 const width, u32 const height) const noexcept
Window::SetWindowSize(const u32 width, const u32 height) const noexcept
{
glfwSetWindowSize(m_Window, static_cast<i32>(width), static_cast<i32>(height));
glfwSetWindowSize(m_Window, Cast<i32>(width), Cast<i32>(height));
}
Size2D
@ -59,14 +35,25 @@ Window::GetSize() const
int width;
int height;
glfwGetFramebufferSize(m_Window, &width, &height);
return {static_cast<u32>(width), static_cast<u32>(height)};
return {Cast<u32>(width), Cast<u32>(height)};
}
Window::Window(cstr const title, Size2D extent, b8 const isFullScreen)
Window::Window(const cstr title, Size2D extent, const b8 isFullScreen)
{
m_Name = title;
SetupLibrary();
if (!m_IsGlfwInit)
{
if (!glfwInit())
{
const char *error = nullptr;
const auto code = glfwGetError(&error);
ERROR("GLFW Init failed. Cause: ({}) {}", code, error)
THEN_ABORT(code);
}
m_WindowCount = 0;
m_IsGlfwInit = true;
}
GLFWmonitor *monitor = glfwGetPrimaryMonitor();
ERROR_IF(!monitor, "No monitor found");
@ -77,22 +64,22 @@ Window::Window(cstr const title, Size2D extent, b8 const isFullScreen)
glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
glfwWindowHint(GLFW_CENTER_CURSOR, GLFW_TRUE);
m_Window = glfwCreateWindow(static_cast<i32>(extent.m_Width), static_cast<i32>(extent.m_Height), m_Name.c_str(),
m_Window = glfwCreateWindow(Cast<i32>(extent.m_Width), Cast<i32>(extent.m_Height), m_Name.c_str(),
isFullScreen ? monitor : nullptr, nullptr);
ERROR_IF(m_Window == nullptr, "Window creation failed")
ELSE_DEBUG("Window '{}' created with resolution '{}x{}'", m_Name, extent.m_Width, extent.m_Height);
if (m_Window == nullptr)
{
char const *error = nullptr;
auto const code = glfwGetError(&error);
const char *error = nullptr;
const auto code = glfwGetError(&error);
ERROR("GLFW Window Creation failed. Cause: ({}) {}", code, error)
THEN_ABORT(code);
}
if (isFullScreen == false)
{
glfwSetWindowPos(m_Window, static_cast<i32>(windowWidth - extent.m_Width) / 2,
static_cast<i32>(windowHeight - extent.m_Height) / 2);
glfwSetWindowPos(m_Window, Cast<i32>(windowWidth - extent.m_Width) / 2,
Cast<i32>(windowHeight - extent.m_Height) / 2);
}
glfwSetInputMode(m_Window, GLFW_CURSOR, GLFW_CURSOR_NORMAL);
@ -109,7 +96,7 @@ Window::~Window()
--m_WindowCount;
}
if (m_WindowCount == 0 && m_IsGlfwInit)
if (m_WindowCount== 0 && m_IsGlfwInit)
{
glfwTerminate();
m_IsGlfwInit = false;

View File

@ -4,8 +4,7 @@ cmake_minimum_required(VERSION 3.13)
target_sources(aster_core
PRIVATE
"rendering_device.cpp"
"commit_manager.cpp"
"pipeline_helpers.cpp"
"context.cpp"
"sync_server.cpp")
"manager.cpp"
"buffer_manager.cpp"
"image_manager.cpp"
"render_resource_manager.cpp")

View File

@ -0,0 +1,48 @@
// =============================================
// Aster: buffer_manager.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "systems/buffer_manager.h"
Manager<Buffer> *Manager<Buffer>::m_Instance = nullptr;
using namespace systems;
BufferHandle
BufferManager::CreateStorageBuffer(const usize size, const cstr name)
{
auto [handle, object] = Alloc();
// TODO: Storage and Index buffer are set.
// This is hacky and should be improved.
constexpr vk::BufferUsageFlags usage = vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eIndexBuffer |
vk::BufferUsageFlagBits::eShaderDeviceAddress;
constexpr VmaAllocationCreateFlags createFlags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT |
VMA_ALLOCATION_CREATE_MAPPED_BIT;
constexpr VmaMemoryUsage memoryUsage = VMA_MEMORY_USAGE_AUTO;
object->Allocate(m_Device, size, usage, createFlags, memoryUsage, name);
return std::move(handle);
}
Manager<Buffer>::Handle
BufferManager::CreateUniformBuffer(const usize size, const cstr name)
{
auto [handle, object] = Alloc();
constexpr vk::BufferUsageFlags usage = vk::BufferUsageFlagBits::eUniformBuffer;
constexpr VmaAllocationCreateFlags createFlags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT |
VMA_ALLOCATION_CREATE_MAPPED_BIT;
constexpr VmaMemoryUsage memoryUsage = VMA_MEMORY_USAGE_AUTO;
object->Allocate(m_Device, size, usage, createFlags, memoryUsage, name);
return std::move(handle);
}
BufferManager::BufferManager(const Device *device, const u32 maxCount, const u8 binding)
: Manager{device, maxCount, binding}
{
}

View File

@ -1,239 +0,0 @@
// =============================================
// Aster: render_resource_manager.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "systems/commit_manager.h"
#include "EASTL/array.h"
#include "core/device.h"
#include "core/image_view.h"
#include "systems/rendering_device.h"
using namespace systems;
CommitManager *CommitManager::m_Instance = nullptr;
CommitManager::CommitManager(RenderingDevice const *device, u32 const maxBuffers, u32 const maxImages,
u32 const maxStorageImages, Ref<Sampler> defaultSampler)
: m_Device{device}
, m_Buffers{maxBuffers}
, m_Images{maxImages}
, m_StorageImages{maxStorageImages}
, m_DefaultSampler{std::move(defaultSampler)}
{
assert(!m_Instance);
eastl::array poolSizes = {
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eStorageBuffer,
.descriptorCount = maxBuffers,
},
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eCombinedImageSampler,
.descriptorCount = maxImages,
},
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eStorageImage,
.descriptorCount = maxStorageImages,
},
};
vk::DescriptorPoolCreateInfo const poolCreateInfo = {
.flags = vk::DescriptorPoolCreateFlagBits::eUpdateAfterBind,
.maxSets = 1,
.poolSizeCount = static_cast<u32>(poolSizes.size()),
.pPoolSizes = poolSizes.data(),
};
AbortIfFailed(device->m_Device->createDescriptorPool(&poolCreateInfo, nullptr, &m_DescriptorPool));
eastl::array descriptorLayoutBindings = {
vk::DescriptorSetLayoutBinding{
.binding = BUFFER_BINDING_INDEX,
.descriptorType = vk::DescriptorType::eStorageBuffer,
.descriptorCount = static_cast<u32>(maxBuffers),
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
vk::DescriptorSetLayoutBinding{
.binding = IMAGE_BINDING_INDEX,
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
.descriptorCount = static_cast<u32>(maxImages),
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
vk::DescriptorSetLayoutBinding{
.binding = STORAGE_IMAGE_BINDING_INDEX,
.descriptorType = vk::DescriptorType::eStorageImage,
.descriptorCount = static_cast<u32>(maxStorageImages),
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
};
vk::DescriptorBindingFlags bindingFlags =
vk::DescriptorBindingFlagBits::ePartiallyBound | vk::DescriptorBindingFlagBits::eUpdateAfterBind;
eastl::array<vk::DescriptorBindingFlags, descriptorLayoutBindings.size()> layoutBindingFlags;
layoutBindingFlags.fill(bindingFlags);
vk::DescriptorSetLayoutBindingFlagsCreateInfo bindingFlagsCreateInfo = {
.bindingCount = static_cast<u32>(layoutBindingFlags.size()),
.pBindingFlags = layoutBindingFlags.data(),
};
static_assert(layoutBindingFlags.size() == descriptorLayoutBindings.size());
vk::DescriptorSetLayoutCreateInfo const descriptorSetLayoutCreateInfo = {
.pNext = &bindingFlagsCreateInfo,
.flags = vk::DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool,
.bindingCount = static_cast<u32>(descriptorLayoutBindings.size()),
.pBindings = descriptorLayoutBindings.data(),
};
AbortIfFailed(device->m_Device->createDescriptorSetLayout(&descriptorSetLayoutCreateInfo, nullptr, &m_SetLayout));
// One descriptor is enough. Updating it at any time is safe. (Update until submit, data held when pending)
// https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VK_EXT_descriptor_indexing.html
// https://github.com/KhronosGroup/Vulkan-Guide/blob/main/chapters/extensions/VK_EXT_descriptor_indexing.adoc
vk::DescriptorSetAllocateInfo const descriptorSetAllocateInfo = {
.descriptorPool = m_DescriptorPool,
.descriptorSetCount = 1,
.pSetLayouts = &m_SetLayout,
};
AbortIfFailed(device->m_Device->allocateDescriptorSets(&descriptorSetAllocateInfo, &m_DescriptorSet));
device->SetName(m_SetLayout, "Bindless Layout");
device->SetName(m_DescriptorPool, "Bindless Pool");
device->SetName(m_DescriptorSet, "Bindless Set");
m_Instance = this;
}
CommitManager::~CommitManager()
{
m_Device->m_Device->destroy(m_SetLayout, nullptr);
m_Device->m_Device->destroy(m_DescriptorPool, nullptr);
#if !defined(ASTER_NDEBUG)
u32 bufferCount = 0;
for (auto const &entry : m_Buffers.m_Data)
{
bufferCount += entry.m_CommitCount;
}
u32 imageCount = 0;
for (auto const &entry : m_Images.m_Data)
{
imageCount += entry.m_CommitCount;
}
if (bufferCount > 0 || imageCount > 0)
{
WARN("Committed resources at destruction. Buffers: {}, Images: {}", bufferCount, imageCount);
}
#endif
}
ResId<Buffer>
CommitManager::CommitBuffer(Ref<Buffer> const &buffer)
{
auto [commit, isNew] = m_Buffers.Create(buffer);
if (!isNew)
return commit;
m_WriteInfos.emplace_back(vk::DescriptorBufferInfo{
.buffer = buffer->m_Buffer,
.offset = 0,
.range = buffer->m_Size,
});
m_Writes.push_back({
.dstSet = m_DescriptorSet,
.dstBinding = BUFFER_BINDING_INDEX,
.dstArrayElement = commit.m_Index,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eStorageBuffer,
.pBufferInfo = &m_WriteInfos.back().uBufferInfo,
});
return commit;
}
ResId<StorageImageView>
CommitManager::CommitStorageImage(Ref<StorageImageView> const &image)
{
auto [commit, isNew] = m_StorageImages.Create(image);
if (!isNew)
return commit;
m_WriteInfos.emplace_back(vk::DescriptorImageInfo{
.sampler = nullptr,
.imageView = image->m_View,
.imageLayout = vk::ImageLayout::eGeneral,
});
m_Writes.push_back({
.dstSet = m_DescriptorSet,
.dstBinding = STORAGE_IMAGE_BINDING_INDEX,
.dstArrayElement = commit.m_Index,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eStorageImage,
.pImageInfo = &m_WriteInfos.back().uImageInfo,
});
return commit;
}
ResId<TextureView>
CommitManager::CommitTexture(Ref<TextureView> const &handle)
{
return CommitTexture(handle, m_DefaultSampler);
}
ResId<TextureView>
CommitManager::CommitTexture(Ref<TextureView> const &image, Ref<Sampler> const &sampler)
{
auto [commit, isNew] = m_Images.Create(image);
if (!isNew)
return commit;
m_WriteInfos.emplace_back(vk::DescriptorImageInfo{
.sampler = sampler->m_Sampler,
.imageView = image->m_View,
.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
});
m_Writes.push_back({
.dstSet = m_DescriptorSet,
.dstBinding = IMAGE_BINDING_INDEX,
.dstArrayElement = commit.m_Index,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
.pImageInfo = &m_WriteInfos.back().uImageInfo,
});
return commit;
}
CommitManager::WriteInfo::WriteInfo(vk::DescriptorBufferInfo const &info)
: uBufferInfo{info}
{
}
CommitManager::WriteInfo::WriteInfo(vk::DescriptorImageInfo const &info)
: uImageInfo{info}
{
}
CommitManager::WriteInfo::WriteInfo(vk::BufferView const &info)
: uBufferView{info}
{
}
void
CommitManager::Update()
{
// Descriptor Updates
if (!m_Writes.empty())
{
m_Device->m_Device->updateDescriptorSets(static_cast<u32>(m_Writes.size()), m_Writes.data(), 0, nullptr);
m_Writes.clear();
m_WriteInfos.clear();
}
m_Buffers.Update();
m_Images.Update();
}

View File

@ -1,506 +0,0 @@
// =============================================
// Aster: context.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "aster/systems/context.h"
#include "aster/systems/commit_manager.h"
#include "aster/systems/rendering_device.h"
constexpr static u32
GetFormatSize(vk::Format const format)
{
switch (format)
{
case vk::Format::eUndefined:
return 0;
case vk::Format::eR8Unorm:
case vk::Format::eR8Snorm:
case vk::Format::eR8Uscaled:
case vk::Format::eR8Sscaled:
case vk::Format::eR8Uint:
case vk::Format::eR8Sint:
case vk::Format::eR8Srgb:
return 1;
case vk::Format::eR8G8Unorm:
case vk::Format::eR8G8Snorm:
case vk::Format::eR8G8Uscaled:
case vk::Format::eR8G8Sscaled:
case vk::Format::eR8G8Uint:
case vk::Format::eR8G8Sint:
case vk::Format::eR8G8Srgb:
return 2;
case vk::Format::eR8G8B8Unorm:
case vk::Format::eR8G8B8Snorm:
case vk::Format::eR8G8B8Uscaled:
case vk::Format::eR8G8B8Sscaled:
case vk::Format::eR8G8B8Uint:
case vk::Format::eR8G8B8Sint:
case vk::Format::eR8G8B8Srgb:
case vk::Format::eB8G8R8Unorm:
case vk::Format::eB8G8R8Snorm:
case vk::Format::eB8G8R8Uscaled:
case vk::Format::eB8G8R8Sscaled:
case vk::Format::eB8G8R8Uint:
case vk::Format::eB8G8R8Sint:
case vk::Format::eB8G8R8Srgb:
return 3;
case vk::Format::eR8G8B8A8Unorm:
case vk::Format::eR8G8B8A8Snorm:
case vk::Format::eR8G8B8A8Uscaled:
case vk::Format::eR8G8B8A8Sscaled:
case vk::Format::eR8G8B8A8Uint:
case vk::Format::eR8G8B8A8Sint:
case vk::Format::eR8G8B8A8Srgb:
case vk::Format::eB8G8R8A8Unorm:
case vk::Format::eB8G8R8A8Snorm:
case vk::Format::eB8G8R8A8Uscaled:
case vk::Format::eB8G8R8A8Sscaled:
case vk::Format::eB8G8R8A8Uint:
case vk::Format::eB8G8R8A8Sint:
case vk::Format::eB8G8R8A8Srgb:
return 4;
case vk::Format::eR16Unorm:
case vk::Format::eR16Snorm:
case vk::Format::eR16Uscaled:
case vk::Format::eR16Sscaled:
case vk::Format::eR16Uint:
case vk::Format::eR16Sint:
case vk::Format::eR16Sfloat:
return 2;
case vk::Format::eR16G16Unorm:
case vk::Format::eR16G16Snorm:
case vk::Format::eR16G16Uscaled:
case vk::Format::eR16G16Sscaled:
case vk::Format::eR16G16Uint:
case vk::Format::eR16G16Sint:
case vk::Format::eR16G16Sfloat:
return 4;
case vk::Format::eR16G16B16Unorm:
case vk::Format::eR16G16B16Snorm:
case vk::Format::eR16G16B16Uscaled:
case vk::Format::eR16G16B16Sscaled:
case vk::Format::eR16G16B16Uint:
case vk::Format::eR16G16B16Sint:
case vk::Format::eR16G16B16Sfloat:
return 6;
case vk::Format::eR16G16B16A16Unorm:
case vk::Format::eR16G16B16A16Snorm:
case vk::Format::eR16G16B16A16Uscaled:
case vk::Format::eR16G16B16A16Sscaled:
case vk::Format::eR16G16B16A16Uint:
case vk::Format::eR16G16B16A16Sint:
case vk::Format::eR16G16B16A16Sfloat:
return 8;
case vk::Format::eR32Uint:
case vk::Format::eR32Sint:
case vk::Format::eR32Sfloat:
return 4;
case vk::Format::eR32G32Uint:
case vk::Format::eR32G32Sint:
case vk::Format::eR32G32Sfloat:
return 8;
case vk::Format::eR32G32B32Uint:
case vk::Format::eR32G32B32Sint:
case vk::Format::eR32G32B32Sfloat:
return 12;
case vk::Format::eR32G32B32A32Uint:
case vk::Format::eR32G32B32A32Sint:
case vk::Format::eR32G32B32A32Sfloat:
return 16;
case vk::Format::eD16Unorm:
return 2;
case vk::Format::eD32Sfloat:
return 4;
case vk::Format::eS8Uint:
return 1;
case vk::Format::eD16UnormS8Uint:
return 6;
case vk::Format::eD24UnormS8Uint:
return 4;
case vk::Format::eD32SfloatS8Uint:
return 5;
default:
TODO("Esoteric Formats");
}
return 0;
}
void
systems::Context::KeepAlive(Ref<Buffer> const &buffer)
{
assert(m_Pool);
m_Pool->KeepAlive(buffer);
}
void
systems::Context::KeepAlive(Ref<Image> const &image)
{
assert(m_Pool);
m_Pool->KeepAlive(image);
}
void
systems::Context::KeepAlive(Ref<ImageView> const &view)
{
assert(m_Pool);
m_Pool->KeepAlive(view);
}
void
systems::Context::Dependency(vk::DependencyInfo const &dependencyInfo)
{
m_Cmd.pipelineBarrier2(&dependencyInfo);
}
void
systems::Context::Begin()
{
vk::CommandBufferBeginInfo commandBufferBeginInfo = {
.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit,
};
auto result = m_Cmd.begin(&commandBufferBeginInfo);
ERROR_IF(Failed(result), "Could not begin context") THEN_ABORT(result);
}
// Release versions inline 'no-op'.
#if !defined(ASTER_NDEBUG)
void
systems::Context::BeginDebugRegion(cstr const name, vec4 const color)
{
vk::DebugUtilsLabelEXT const label = {
.pLabelName = name,
.color = std::array{color.r, color.g, color.b, color.a},
};
m_Cmd.beginDebugUtilsLabelEXT(&label);
}
void
systems::Context::EndDebugRegion()
{
m_Cmd.endDebugUtilsLabelEXT();
}
#endif
void
systems::Context::End()
{
auto result = m_Cmd.end();
ERROR_IF(Failed(result), "Could not end context") THEN_ABORT(result);
}
void
systems::ComputeContext::Dispatch(Pipeline const &pipeline, u32 x, u32 y, u32 z, usize size, void *data)
{
BindPipeline(pipeline);
PushConstantBlock(0, size, data);
m_Cmd.dispatch(x, y, z);
}
void
systems::ComputeContext::BindPipeline(Pipeline const &pipeline)
{
auto bindPoint = vk::PipelineBindPoint::eGraphics;
switch (pipeline.m_Kind)
{
case Pipeline::Kind::eGraphics:
bindPoint = vk::PipelineBindPoint::eGraphics;
break;
case Pipeline::Kind::eCompute:
bindPoint = vk::PipelineBindPoint::eCompute;
break;
default:
UNREACHABLE("No additional bind points");
}
m_Cmd.bindPipeline(bindPoint, pipeline.m_Pipeline);
// TODO: Maybe find a smarter place to host this.
if (CommitManager::IsInit())
{
m_Cmd.bindDescriptorSets(bindPoint, pipeline.m_Layout, 0, 1, &CommitManager::Instance().GetDescriptorSet(), 0,
nullptr);
}
m_PipelineInUse = &pipeline;
}
void
systems::GraphicsContext::SetViewport(vk::Viewport const &viewport)
{
m_Cmd.setViewport(0, 1, &viewport);
}
void
systems::GraphicsContext::BindVertexBuffer(Ref<VertexBuffer> const &vertexBuffer)
{
constexpr vk::DeviceSize offset = 0;
m_Cmd.bindVertexBuffers(0, 1, &vertexBuffer->m_Buffer, &offset);
}
void
systems::GraphicsContext::BindIndexBuffer(Ref<IndexBuffer> const &indexBuffer)
{
m_Cmd.bindIndexBuffer(indexBuffer->m_Buffer, 0, vk::IndexType::eUint32);
}
void
systems::GraphicsContext::Draw(usize const vertexCount)
{
m_Cmd.draw(static_cast<u32>(vertexCount), 1, 0, 0);
}
void
systems::GraphicsContext::DrawIndexed(usize indexCount)
{
m_Cmd.drawIndexed(static_cast<u32>(indexCount), 1, 0, 0, 0);
}
void
systems::GraphicsContext::DrawIndexed(usize const indexCount, usize const firstIndex, usize const firstVertex)
{
m_Cmd.drawIndexed(static_cast<u32>(indexCount), 1, static_cast<u32>(firstIndex), static_cast<i32>(firstVertex), 0);
}
void
systems::GraphicsContext::BeginRendering(vk::RenderingInfo const &renderingInfo)
{
m_Cmd.beginRendering(&renderingInfo);
m_Cmd.setScissor(0, 1, &renderingInfo.renderArea);
}
void
systems::GraphicsContext::EndRendering()
{
m_Cmd.endRendering();
}
void
systems::TransferContext::UploadTexture(Ref<Image> const &image, eastl::span<u8> const &data)
{
ERROR_IF(not(image and image->IsValid()), "Invalid image");
auto [w, h, d] = image->m_Extent;
auto formatSize = GetFormatSize(image->m_Format);
auto expectedByteSize = static_cast<u64>(w) * static_cast<u64>(h) * static_cast<u64>(d) * formatSize;
ERROR_IF(expectedByteSize != data.size_bytes(), "Mismatch in data size {} vs image size {} ({}x{}x{}x{})",
data.size_bytes(), expectedByteSize, w, h, d, formatSize);
Ref<StagingBuffer> const stagingBuffer = m_Pool->GetDevice().CreateStagingBuffer(data.size_bytes());
stagingBuffer->Write(0, data.size_bytes(), data.data());
vk::BufferImageCopy const bufferImageCopy = {
.bufferOffset = 0,
.bufferRowLength = w,
.bufferImageHeight = h,
.imageSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {},
.imageExtent = image->m_Extent,
};
m_Cmd.copyBufferToImage(stagingBuffer->m_Buffer, image->m_Image, vk::ImageLayout::eTransferDstOptimal, 1,
&bufferImageCopy);
KeepAlive(stagingBuffer);
KeepAlive(image);
}
void
systems::TransferContext::UploadBuffer(Ref<Buffer> const &buffer, usize size, void const *data)
{
ERROR_IF(not(buffer and buffer->IsValid()), "Invalid buffer");
auto expectedByteSize = buffer->m_Size;
ERROR_IF(expectedByteSize != size, "Mismatch in data size {} vs buffer size {}", size, expectedByteSize);
Ref<StagingBuffer> const stagingBuffer = m_Pool->GetDevice().CreateStagingBuffer(size);
stagingBuffer->Write(0, size, data);
vk::BufferCopy const bufferCopy = {.srcOffset = 0, .dstOffset = 0, .size = expectedByteSize};
m_Cmd.copyBuffer(stagingBuffer->m_Buffer, buffer->m_Buffer, 1, &bufferCopy);
KeepAlive(stagingBuffer);
KeepAlive(buffer);
}
void
systems::TransferContext::Blit(vk::BlitImageInfo2 const &mipBlitInfo)
{
m_Cmd.blitImage2(&mipBlitInfo);
}
systems::TransferContext::TransferContext(TransferContext &&other) noexcept
: Context{std::move(other)}
{
}
systems::TransferContext &
systems::TransferContext::operator=(TransferContext &&other) noexcept
{
if (this == &other)
return *this;
Context::operator=(std::move(other));
return *this;
}
void
systems::ComputeContext::PushConstantBlock(usize const offset, usize const size, void const *data)
{
assert(m_PipelineInUse);
vk::ShaderStageFlags stage;
switch (m_PipelineInUse->m_Kind)
{
case Pipeline::Kind::eGraphics:
stage = vk::ShaderStageFlagBits::eAll;
break;
case Pipeline::Kind::eCompute:
stage = vk::ShaderStageFlagBits::eCompute;
break;
}
m_Cmd.pushConstants(m_PipelineInUse->m_Layout, stage, static_cast<u32>(offset), static_cast<u32>(size), data);
}
using namespace systems::_internal;
ContextPool::ContextPool(RenderingDevice &device, u32 const queueFamilyIndex, ManagedBy const managedBy)
: m_Device{&device}
, m_BuffersAllocated{0}
, m_ExtraData{0}
, m_ManagedBy{managedBy}
, m_ResetCallback{}
{
vk::CommandPoolCreateInfo const commandPoolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient,
.queueFamilyIndex = queueFamilyIndex,
};
AbortIfFailed(device.m_Device->createCommandPool(&commandPoolCreateInfo, nullptr, &m_Pool));
}
ContextPool::ContextPool(ContextPool &&other) noexcept
: m_Device{other.m_Device}
, m_Pool{Take(other.m_Pool)}
, m_CommandBuffers{std::move(other.m_CommandBuffers)}
, m_BuffersAllocated{other.m_BuffersAllocated}
, m_ExtraData{other.m_ExtraData}
, m_ManagedBy{other.m_ManagedBy}
, m_OwnedBuffers{std::move(other.m_OwnedBuffers)}
, m_OwnedImages{std::move(other.m_OwnedImages)}
, m_OwnedImageViews{std::move(other.m_OwnedImageViews)}
, m_ResetCallback{std::move(other.m_ResetCallback)}
{
}
ContextPool &
ContextPool::operator=(ContextPool &&other) noexcept
{
if (this == &other)
return *this;
using eastl::swap;
swap(m_Device, other.m_Device);
swap(m_Pool, other.m_Pool);
swap(m_CommandBuffers, other.m_CommandBuffers);
swap(m_ExtraData, other.m_ExtraData);
swap(m_ManagedBy, other.m_ManagedBy);
swap(m_BuffersAllocated, other.m_BuffersAllocated);
swap(m_OwnedBuffers, other.m_OwnedBuffers);
swap(m_OwnedImages, other.m_OwnedImages);
swap(m_OwnedImageViews, other.m_OwnedImageViews);
swap(m_ResetCallback, other.m_ResetCallback);
return *this;
}
ContextPool::~ContextPool()
{
if (!m_Pool)
return;
m_Device->m_Device->destroy(Take(m_Pool), nullptr);
}
void
ContextPool::KeepAlive(Ref<Buffer> const &buffer)
{
m_OwnedBuffers.push_back(buffer);
}
void
ContextPool::KeepAlive(Ref<Image> const &image)
{
m_OwnedImages.push_back(image);
}
void
ContextPool::KeepAlive(Ref<ImageView> const &view)
{
m_OwnedImageViews.push_back(view);
}
vk::CommandBuffer
ContextPool::AllocateCommandBuffer()
{
// Buffers are available.
if (m_BuffersAllocated < m_CommandBuffers.size())
{
return m_CommandBuffers[m_BuffersAllocated++];
}
// Allocate New Buffer.
vk::CommandBufferAllocateInfo const allocateInfo = {
.commandPool = m_Pool,
.level = vk::CommandBufferLevel::ePrimary,
.commandBufferCount = 1,
};
vk::CommandBuffer &cmd = m_CommandBuffers.emplace_back();
AbortIfFailed(m_Device->m_Device->allocateCommandBuffers(&allocateInfo, &cmd));
++m_BuffersAllocated;
return cmd;
}
systems::Context
ContextPool::CreateContext()
{
return Context{*this, AllocateCommandBuffer()};
}
void
ContextPool::Reset()
{
assert(m_Pool);
AbortIfFailed(m_Device->m_Device->resetCommandPool(m_Pool, {}));
m_BuffersAllocated = 0;
m_OwnedBuffers.clear();
m_OwnedImages.clear();
m_OwnedImageViews.clear();
}
systems::TransferContext
TransferContextPool::CreateTransferContext()
{
return TransferContext{*this, AllocateCommandBuffer()};
}
systems::ComputeContext
ComputeContextPool::CreateComputeContext()
{
return ComputeContext{*this, AllocateCommandBuffer()};
}
systems::GraphicsContext
GraphicsContextPool::CreateGraphicsContext()
{
return GraphicsContext{*this, AllocateCommandBuffer()};
}

View File

@ -0,0 +1,316 @@
// =============================================
// Aster: buffer_manager.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "systems/image_manager.h"
#include "core/device.h"
Manager<Image> *Manager<Image>::m_Instance = nullptr;
using namespace systems;
vk::ImageCreateInfo ToImageCreateInfo(const Texture2DCreateInfo &createInfo);
vk::ImageCreateInfo ToImageCreateInfo(const TextureCubeCreateInfo &createInfo);
vk::ImageCreateInfo ToImageCreateInfo(const AttachmentCreateInfo &createInfo);
vk::ImageCreateInfo ToImageCreateInfo(const DepthStencilImageCreateInfo &createInfo);
namespace usage_flags
{
constexpr vk::ImageUsageFlags MIPMAP = vk::ImageUsageFlagBits::eTransferSrc | vk::ImageUsageFlagBits::eTransferDst;
constexpr vk::ImageUsageFlags SAMPLE = vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferDst;
constexpr vk::ImageUsageFlags STORAGE =
vk::ImageUsageFlagBits::eStorage | vk::ImageUsageFlagBits::eTransferDst | vk::ImageUsageFlagBits::eTransferSrc;
constexpr vk::ImageUsageFlags COLOR_ATTACHMENT =
vk::ImageUsageFlagBits::eColorAttachment | vk::ImageUsageFlagBits::eTransferSrc;
constexpr vk::ImageUsageFlags DEPTH_STENCIL_ATTACHMENT = vk::ImageUsageFlagBits::eDepthStencilAttachment;
} // namespace usage_flags
ImageHandle
ImageManager::CreateTexture2D(const Texture2DCreateInfo &createInfo)
{
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = {},
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
vk::ImageCreateInfo imageCreateInfo = ToImageCreateInfo(createInfo);
auto result = Cast<vk::Result>(vmaCreateImage(m_Device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", createInfo.m_Name, result) THEN_ABORT(result);
vk::ImageView view;
const vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::e2D,
.format = imageCreateInfo.format,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = imageCreateInfo.mipLevels,
.baseArrayLayer = 0,
.layerCount = imageCreateInfo.arrayLayers,
},
};
result = m_Device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", createInfo.m_Name, result)
THEN_ABORT(result);
auto [handle, object] = Alloc();
object->m_Image = image;
object->m_View = view;
object->m_Allocation = allocation;
object->m_Extent = imageCreateInfo.extent;
object->m_Flags_ = Image::OWNED_BIT | Image::VALID_BIT;
object->m_LayerCount = Cast<u8>(imageCreateInfo.arrayLayers);
object->m_MipLevels = Cast<u8>(imageCreateInfo.mipLevels);
m_Device->SetName(object->m_Image, createInfo.m_Name);
return handle;
}
ImageHandle
ImageManager::CreateTextureCube(const TextureCubeCreateInfo &createInfo)
{
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = {},
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
vk::ImageCreateInfo imageCreateInfo = ToImageCreateInfo(createInfo);
auto result = Cast<vk::Result>(vmaCreateImage(m_Device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", createInfo.m_Name, result) THEN_ABORT(result);
vk::ImageView view;
const vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::eCube,
.format = imageCreateInfo.format,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = imageCreateInfo.mipLevels,
.baseArrayLayer = 0,
.layerCount = imageCreateInfo.arrayLayers,
},
};
result = m_Device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", createInfo.m_Name, result)
THEN_ABORT(result);
auto [handle, object] = Alloc();
object->m_Image = image;
object->m_View = view;
object->m_Allocation = allocation;
object->m_Extent = imageCreateInfo.extent;
object->m_Flags_ = Image::OWNED_BIT | Image::VALID_BIT;
object->m_LayerCount = Cast<u8>(imageCreateInfo.arrayLayers);
object->m_MipLevels = Cast<u8>(imageCreateInfo.mipLevels);
m_Device->SetName(object->m_Image, createInfo.m_Name);
return handle;
}
ImageHandle
ImageManager::CreateAttachment(const AttachmentCreateInfo &createInfo)
{
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
vk::ImageCreateInfo imageCreateInfo = ToImageCreateInfo(createInfo);
auto result = Cast<vk::Result>(vmaCreateImage(m_Device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", createInfo.m_Name, result) THEN_ABORT(result);
vk::ImageView view;
const vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::e2D,
.format = imageCreateInfo.format,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = imageCreateInfo.mipLevels,
.baseArrayLayer = 0,
.layerCount = imageCreateInfo.arrayLayers,
},
};
result = m_Device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", createInfo.m_Name, result)
THEN_ABORT(result);
auto [handle, object] = Alloc();
object->m_Image = image;
object->m_View = view;
object->m_Allocation = allocation;
object->m_Extent = imageCreateInfo.extent;
object->m_Flags_ = Image::OWNED_BIT | Image::VALID_BIT;
object->m_LayerCount = Cast<u8>(imageCreateInfo.arrayLayers);
object->m_MipLevels = Cast<u8>(imageCreateInfo.mipLevels);
m_Device->SetName(object->m_Image, createInfo.m_Name);
return handle;
}
ImageHandle
ImageManager::CreateDepthStencilImage(const DepthStencilImageCreateInfo &createInfo)
{
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
vk::ImageCreateInfo imageCreateInfo = ToImageCreateInfo(createInfo);
auto result = Cast<vk::Result>(vmaCreateImage(m_Device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
&allocationCreateInfo, &image, &allocation, nullptr));
ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", createInfo.m_Name, result) THEN_ABORT(result);
vk::ImageView view;
const vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = image,
.viewType = vk::ImageViewType::e2D,
.format = imageCreateInfo.format,
.components = {},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil,
.baseMipLevel = 0,
.levelCount = imageCreateInfo.mipLevels,
.baseArrayLayer = 0,
.layerCount = imageCreateInfo.arrayLayers,
},
};
result = m_Device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", createInfo.m_Name, result)
THEN_ABORT(result);
auto [handle, object] = Alloc();
object->m_Image = image;
object->m_View = view;
object->m_Allocation = allocation;
object->m_Extent = imageCreateInfo.extent;
object->m_Flags_ = Image::OWNED_BIT | Image::VALID_BIT;
object->m_LayerCount = Cast<u8>(imageCreateInfo.arrayLayers);
object->m_MipLevels = Cast<u8>(imageCreateInfo.mipLevels);
m_Device->SetName(object->m_Image, createInfo.m_Name);
return handle;
}
vk::ImageCreateInfo
ToImageCreateInfo(const Texture2DCreateInfo &createInfo)
{
auto &[format, extent, name, isSampled, isMipMapped, isStorage] = createInfo;
WARN_IF(!IsPowerOfTwo(extent.width) || !IsPowerOfTwo(extent.width), "Image {2} is {0}x{1} (Non Power of Two)",
extent.width, extent.height, name ? name : "<unnamed>");
const u8 mipLevels = isMipMapped ? 1 + Cast<u8>(floor(log2(eastl::max(extent.width, extent.height)))) : 1;
auto usage = vk::ImageUsageFlags{};
if (isSampled)
usage |= usage_flags::SAMPLE;
if (isMipMapped)
usage |= usage_flags::MIPMAP;
if (isStorage)
usage |= usage_flags::STORAGE;
return {
.imageType = vk::ImageType::e2D,
.format = format,
.extent = ToExtent3D(extent, 1),
.mipLevels = mipLevels,
.arrayLayers = 1,
.usage = usage,
};
}
vk::ImageCreateInfo
ToImageCreateInfo(const TextureCubeCreateInfo &createInfo)
{
auto &[format, side, name, isSampled, isMipMapped, isStorage] = createInfo;
WARN_IF(!IsPowerOfTwo(side), "ImageCube {1} is {0}x{0} (Non Power of Two)", side, name ? name : "<unnamed>");
const u8 mipLevels = isMipMapped ? 1 + Cast<u8>(floor(log2(side))) : 1;
auto usage = vk::ImageUsageFlags{};
if (isSampled)
usage |= usage_flags::SAMPLE;
if (isMipMapped)
usage |= usage_flags::MIPMAP;
if (isStorage)
usage |= usage_flags::STORAGE;
return {
.flags = vk::ImageCreateFlagBits::eCubeCompatible,
.imageType = vk::ImageType::e2D,
.format = format,
.extent = {side, side, 1},
.mipLevels = mipLevels,
.arrayLayers = 6,
.usage = usage,
};
}
vk::ImageCreateInfo
ToImageCreateInfo(const AttachmentCreateInfo &createInfo)
{
auto &[format, extent, name] = createInfo;
constexpr auto usage = usage_flags::COLOR_ATTACHMENT;
return {
.imageType = vk::ImageType::e2D,
.format = format,
.extent = ToExtent3D(extent, 1),
.mipLevels = 1,
.arrayLayers = 1,
.usage = usage,
};
}
vk::ImageCreateInfo
ToImageCreateInfo(const DepthStencilImageCreateInfo &createInfo)
{
auto &[extent, name] = createInfo;
constexpr vk::Format format = vk::Format::eD24UnormS8Uint;
constexpr auto usage = usage_flags::DEPTH_STENCIL_ATTACHMENT;
return {
.imageType = vk::ImageType::e2D,
.format = format,
.extent = ToExtent3D(extent, 1),
.mipLevels = 1,
.arrayLayers = 1,
.usage = usage,
};
}
ImageManager::ImageManager(const Device *device, const u32 maxCount, const u8 binding)
: Manager{device, maxCount, binding}
{
}

View File

@ -0,0 +1,6 @@
// =============================================
// Aster: manager.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "systems/manager.h"

View File

@ -1,374 +0,0 @@
// =============================================
// Aster: pipeline_helpers.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "systems/rendering_device.h"
#include <aster/systems/pipeline_helpers.h>
using namespace systems::_internal;
struct WhatVisitor
{
std::string
operator()(std::monostate) const
{
return "No Error";
}
std::string
operator()(vk::Result result) const
{
return fmt::format("Vulkan Error: {}", result);
}
std::string
operator()(SlangResult result) const
{
return fmt::format("Slang Error: {}", result);
}
};
struct ValueVisitor
{
i32
operator()(std::monostate) const
{
return 0;
}
i32
operator()(vk::Result result) const
{
return static_cast<i32>(result);
}
i32
operator()(SlangResult result) const
{
return result;
}
};
i32
systems::PipelineCreationError::Value()
{
return std::visit(ValueVisitor{}, m_Data);
}
systems::PipelineCreationError::PipelineCreationError(vk::Result res)
: m_Data{res}
{
}
systems::PipelineCreationError::PipelineCreationError(SlangResult res)
: m_Data{res}
{
}
systems::PipelineCreationError::PipelineCreationError()
: m_Data{std::monostate{}}
{
}
systems::PipelineCreationError::operator bool() const
{
return not std::holds_alternative<std::monostate>(m_Data);
}
std::string
systems::PipelineCreationError::What()
{
return std::visit(WhatVisitor{}, m_Data);
}
vk::ShaderStageFlagBits
systems::SlangToVulkanShaderStage(SlangStage const stage)
{
switch (stage)
{
case SLANG_STAGE_VERTEX:
return vk::ShaderStageFlagBits::eVertex;
case SLANG_STAGE_HULL:
return vk::ShaderStageFlagBits::eTessellationControl;
case SLANG_STAGE_DOMAIN:
return vk::ShaderStageFlagBits::eTessellationEvaluation;
case SLANG_STAGE_GEOMETRY:
return vk::ShaderStageFlagBits::eGeometry;
case SLANG_STAGE_FRAGMENT:
return vk::ShaderStageFlagBits::eFragment;
case SLANG_STAGE_COMPUTE:
return vk::ShaderStageFlagBits::eCompute;
case SLANG_STAGE_RAY_GENERATION:
return vk::ShaderStageFlagBits::eRaygenKHR;
case SLANG_STAGE_INTERSECTION:
return vk::ShaderStageFlagBits::eIntersectionKHR;
case SLANG_STAGE_ANY_HIT:
return vk::ShaderStageFlagBits::eAnyHitKHR;
case SLANG_STAGE_CLOSEST_HIT:
return vk::ShaderStageFlagBits::eClosestHitKHR;
case SLANG_STAGE_MISS:
return vk::ShaderStageFlagBits::eMissKHR;
case SLANG_STAGE_CALLABLE:
return vk::ShaderStageFlagBits::eCallableKHR;
case SLANG_STAGE_MESH:
return vk::ShaderStageFlagBits::eMeshEXT;
case SLANG_STAGE_AMPLIFICATION:
return vk::ShaderStageFlagBits::eTaskEXT;
case SLANG_STAGE_NONE:
case SLANG_STAGE_COUNT:
UNREACHABLE();
return {};
}
UNREACHABLE();
return {};
}
PipelineLayoutBuilder::PipelineLayoutBuilder(RenderingDevice *device, vk::DescriptorSetLayout bindlessLayout)
: m_Device{device}
, m_DescriptorSetLayouts{bindlessLayout} // if `null` will be filtered out during build.
{
}
vk::PipelineLayout
PipelineLayoutBuilder::Build()
{
vk::PipelineLayout pipelineLayout;
eastl::vector<vk::DescriptorSetLayout> filteredDescriptorSetLayouts;
filteredDescriptorSetLayouts.reserve(m_DescriptorSetLayouts.size());
for (auto dsl : m_DescriptorSetLayouts)
{
if (dsl)
{
filteredDescriptorSetLayouts.push_back(dsl);
}
}
vk::PipelineLayoutCreateInfo const createInfo = {
.setLayoutCount = static_cast<u32>(filteredDescriptorSetLayouts.size()),
.pSetLayouts = filteredDescriptorSetLayouts.data(),
.pushConstantRangeCount = static_cast<u32>(m_PushConstants.size()),
.pPushConstantRanges = m_PushConstants.data(),
};
AbortIfFailed(m_Device->m_Device->createPipelineLayout(&createInfo, nullptr, &pipelineLayout));
return pipelineLayout;
}
vk::DescriptorSetLayout
PipelineLayoutBuilder::CreateDescriptorSetLayout(vk::DescriptorSetLayoutCreateInfo const &createInfo) const
{
vk::DescriptorSetLayout dsl;
// Failure Cases are OoM errors. No recovery.
AbortIfFailed(m_Device->m_Device->createDescriptorSetLayout(&createInfo, nullptr, &dsl));
return dsl;
}
void
PipelineLayoutBuilder::AddDescriptorSetForParameterBlock(slang::TypeLayoutReflection *layout)
{
DescriptorLayoutBuilder descriptorLayoutBuilder{this};
descriptorLayoutBuilder.AddRangesForParamBlockElement(layout->getElementTypeLayout());
descriptorLayoutBuilder.Build();
}
void
PipelineLayoutBuilder::AddPushConstantRangeForConstantBuffer(slang::TypeLayoutReflection *layout)
{
auto const elementTypeLayout = layout->getElementTypeLayout();
auto const elementSize = elementTypeLayout->getSize();
if (elementSize == 0)
return;
m_PushConstants.push_back({
.stageFlags = m_Stage,
.offset = 0,
.size = static_cast<u32>(elementSize),
});
}
void
PipelineLayoutBuilder::AddSubObjectRange(slang::TypeLayoutReflection *layout, i64 subObjectRangeIndex)
{
auto bindingRangeIndex = layout->getSubObjectRangeBindingRangeIndex(subObjectRangeIndex);
switch (layout->getBindingRangeType(bindingRangeIndex))
{
case slang::BindingType::ParameterBlock: {
auto const parameterBlockTypeLayout = layout->getBindingRangeLeafTypeLayout(bindingRangeIndex);
AddDescriptorSetForParameterBlock(parameterBlockTypeLayout);
}
break;
case slang::BindingType::PushConstant: {
auto const constantBufferTypeLayout = layout->getBindingRangeLeafTypeLayout(bindingRangeIndex);
AddPushConstantRangeForConstantBuffer(constantBufferTypeLayout);
}
break;
default:
UNREACHABLE("Unexpected types");
}
}
vk::DescriptorType
BindingTypeToDescriptorType(slang::BindingType binding)
{
using vk::DescriptorType;
switch (binding)
{
case slang::BindingType::Sampler:
return DescriptorType::eSampler;
case slang::BindingType::Texture:
return DescriptorType::eSampledImage;
case slang::BindingType::ConstantBuffer:
return DescriptorType::eUniformBuffer;
case slang::BindingType::TypedBuffer:
return DescriptorType::eStorageBuffer;
case slang::BindingType::RawBuffer:
return DescriptorType::eStorageBuffer;
case slang::BindingType::CombinedTextureSampler:
return DescriptorType::eCombinedImageSampler;
case slang::BindingType::InlineUniformData:
return DescriptorType::eInlineUniformBlock;
case slang::BindingType::RayTracingAccelerationStructure:
return DescriptorType::eAccelerationStructureKHR;
case slang::BindingType::MutableTexture:
return DescriptorType::eStorageImage;
case slang::BindingType::MutableTypedBuffer:
return DescriptorType::eStorageBuffer;
case slang::BindingType::MutableRawBuffer:
return DescriptorType::eStorageBuffer;
default:
UNREACHABLE("Unsupported Types");
}
return {};
}
vk::ShaderStageFlags &
DescriptorLayoutBuilder::Stage() const
{
return m_PipelineLayoutBuilder->m_Stage;
}
DescriptorLayoutBuilder::DescriptorLayoutBuilder(PipelineLayoutBuilder *pipelineLayoutBuilder)
: m_PipelineLayoutBuilder{pipelineLayoutBuilder}
, m_SetIndex{static_cast<u32>(pipelineLayoutBuilder->m_DescriptorSetLayouts.size())}
{
m_PipelineLayoutBuilder->m_DescriptorSetLayouts.push_back();
}
void
DescriptorLayoutBuilder::AddDescriptorRange(slang::TypeLayoutReflection *layout, i64 const relativeSetIndex,
i64 const rangeIndex)
{
auto const bindingType = layout->getDescriptorSetDescriptorRangeType(relativeSetIndex, rangeIndex);
if (bindingType == slang::BindingType::PushConstant)
return;
u32 const descriptorCount =
static_cast<u32>(layout->getDescriptorSetDescriptorRangeDescriptorCount(relativeSetIndex, rangeIndex));
u32 const bindingIndex = static_cast<u32>(m_LayoutBindings.size());
auto const vkBindingType = BindingTypeToDescriptorType(bindingType);
m_LayoutBindings.push_back({
.binding = bindingIndex,
.descriptorType = vkBindingType,
.descriptorCount = descriptorCount,
.stageFlags = Stage(),
});
}
void
DescriptorLayoutBuilder::AddDescriptorRanges(slang::TypeLayoutReflection *layout)
{
i64 nSets = layout->getDescriptorSetCount();
for (i64 relativeSetIndex = 0; relativeSetIndex < nSets; ++relativeSetIndex)
{
i64 rangeCount = layout->getDescriptorSetDescriptorRangeCount(relativeSetIndex);
for (i64 rangeIndex = 0; rangeIndex < rangeCount; ++rangeIndex)
{
AddDescriptorRange(layout, relativeSetIndex, rangeIndex);
}
}
}
void
DescriptorLayoutBuilder::Build()
{
if (m_LayoutBindings.empty())
return;
auto const dsl = m_PipelineLayoutBuilder->CreateDescriptorSetLayout({
.bindingCount = static_cast<u32>(m_LayoutBindings.size()),
.pBindings = m_LayoutBindings.data(),
});
m_PipelineLayoutBuilder->m_DescriptorSetLayouts[m_SetIndex] = dsl;
}
void
DescriptorLayoutBuilder::AddAutomaticallyIntroducedUniformBuffer()
{
auto const vulkanBindingIndex = static_cast<u32>(m_LayoutBindings.size());
m_LayoutBindings.push_back({
.binding = vulkanBindingIndex,
.descriptorType = vk::DescriptorType::eUniformBuffer,
.descriptorCount = 1,
.stageFlags = vk::ShaderStageFlagBits::eAll,
});
}
void
DescriptorLayoutBuilder::AddRanges(slang::TypeLayoutReflection *layout)
{
AddDescriptorRanges(layout);
m_PipelineLayoutBuilder->AddSubObjectRanges(layout);
}
void
DescriptorLayoutBuilder::AddRangesForParamBlockElement(slang::TypeLayoutReflection *layout)
{
if (layout->getSize() > 0)
{
AddAutomaticallyIntroducedUniformBuffer();
}
AddRanges(layout);
}
void
DescriptorLayoutBuilder::AddGlobalScopeParameters(slang::ProgramLayout *layout)
{
Stage() = vk::ShaderStageFlagBits::eAll;
AddRangesForParamBlockElement(layout->getGlobalParamsTypeLayout());
}
void
DescriptorLayoutBuilder::AddEntryPointParameters(slang::ProgramLayout *layout)
{
u64 entryPointCount = layout->getEntryPointCount();
for (u64 i = 0; i < entryPointCount; ++i)
{
auto *entryPoint = layout->getEntryPointByIndex(i);
AddEntryPointParameters(entryPoint);
}
}
void
DescriptorLayoutBuilder::AddEntryPointParameters(slang::EntryPointLayout *layout)
{
Stage() = SlangToVulkanShaderStage(layout->getStage());
AddRangesForParamBlockElement(layout->getTypeLayout());
}
void
PipelineLayoutBuilder::AddSubObjectRanges(slang::TypeLayoutReflection *layout)
{
i64 subObjectRangeCount = layout->getSubObjectRangeCount();
for (i64 subObjectRangeIndex = 0; subObjectRangeIndex < subObjectRangeCount; ++subObjectRangeIndex)
{
AddSubObjectRange(layout, subObjectRangeIndex);
}
}

View File

@ -0,0 +1,195 @@
// =============================================
// Aster: render_resource_manager.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "systems/render_resource_manager.h"
#include "EASTL/array.h"
#include "core/device.h"
#define AbortIfFailed(RESULT) \
do \
{ \
vk::Result _checkResultValue_; \
ERROR_IF(Failed(_checkResultValue_ = Cast<vk::Result>(RESULT)), "Cause: {}", _checkResultValue_) \
THEN_ABORT(_checkResultValue_); \
} while (false)
#define AbortIfFailedMV(RESULT, MSG, EXTRA) \
do \
{ \
vk::Result _checkResultValue_; \
ERROR_IF(Failed(_checkResultValue_ = Cast<vk::Result>(RESULT)), MSG " Cause: {}", EXTRA, _checkResultValue_) \
THEN_ABORT(_checkResultValue_); \
} while (false)
#define AbortIfFailedM(RESULT, MSG) \
do \
{ \
auto _checkResultValue_ = Cast<vk::Result>(RESULT); \
ERROR_IF(Failed(_checkResultValue_), MSG " Cause: {}", _checkResultValue_) THEN_ABORT(_checkResultValue_); \
} while (false)
using namespace systems;
u32
GetHandleInternal(concepts::HandleType auto &handle)
{
return *Recast<u32 *>(&handle);
}
RenderResourceManager::WriteOwner::WriteOwner(const Handle<Buffer> &handle)
: uBufferHandle(handle)
{
}
RenderResourceManager::WriteOwner::WriteOwner(const Handle<Image> &handle)
: uImageHandle(handle)
{
}
RenderResourceManager::RenderResourceManager(const Device *device, u32 const maxBuffers, const u32 maxImages)
: m_BufferManager{device, maxBuffers, BUFFER_BINDING_INDEX}
, m_ImageManager{device, maxImages, IMAGE_BINDING_INDEX}
{
eastl::array poolSizes = {
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eStorageBuffer,
.descriptorCount = maxBuffers,
},
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eCombinedImageSampler,
.descriptorCount = maxImages,
},
//vk::DescriptorPoolSize{
// .type = vk::DescriptorType::eStorageImage,
// .descriptorCount = storageTexturesCount,
//},
};
const vk::DescriptorPoolCreateInfo poolCreateInfo = {
.flags = vk::DescriptorPoolCreateFlagBits::eUpdateAfterBind,
.maxSets = 1,
.poolSizeCount = Cast<u32>(poolSizes.size()),
.pPoolSizes = poolSizes.data(),
};
AbortIfFailed(device->m_Device.createDescriptorPool(&poolCreateInfo, nullptr, &m_DescriptorPool));
eastl::array descriptorLayoutBindings = {
vk::DescriptorSetLayoutBinding{
.binding = BUFFER_BINDING_INDEX,
.descriptorType = vk::DescriptorType::eStorageBuffer,
.descriptorCount = Cast<u32>(maxBuffers),
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
vk::DescriptorSetLayoutBinding{
.binding = IMAGE_BINDING_INDEX,
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
.descriptorCount = Cast<u32>(maxImages),
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
//vk::DescriptorSetLayoutBinding{
// .binding = STORAGE_TEXTURE_BINDING_INDEX,
// .descriptorType = vk::DescriptorType::eStorageImage,
// .descriptorCount = Cast<u32>(storageTexturesCount),
// .stageFlags = vk::ShaderStageFlagBits::eAll,
//},
};
vk::DescriptorBindingFlags bindingFlags =
vk::DescriptorBindingFlagBits::ePartiallyBound | vk::DescriptorBindingFlagBits::eUpdateAfterBind;
eastl::array<vk::DescriptorBindingFlags, decltype(descriptorLayoutBindings)::count> layoutBindingFlags;
layoutBindingFlags.fill(bindingFlags);
vk::DescriptorSetLayoutBindingFlagsCreateInfo bindingFlagsCreateInfo = {
.bindingCount = Cast<u32>(layoutBindingFlags.size()),
.pBindingFlags = layoutBindingFlags.data(),
};
static_assert(layoutBindingFlags.size() == descriptorLayoutBindings.size());
const vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = {
.pNext = &bindingFlagsCreateInfo,
.flags = vk::DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool,
.bindingCount = Cast<u32>(descriptorLayoutBindings.size()),
.pBindings = descriptorLayoutBindings.data(),
};
AbortIfFailed(device->m_Device.createDescriptorSetLayout(&descriptorSetLayoutCreateInfo, nullptr, &m_SetLayout));
// One descriptor is enough. Updating it at any time is safe. (Update until submit, data held when pending)
// https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VK_EXT_descriptor_indexing.html
// https://github.com/KhronosGroup/Vulkan-Guide/blob/main/chapters/extensions/VK_EXT_descriptor_indexing.adoc
const vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo = {
.descriptorPool = m_DescriptorPool,
.descriptorSetCount = 1,
.pSetLayouts = &m_SetLayout,
};
AbortIfFailed(device->m_Device.allocateDescriptorSets(&descriptorSetAllocateInfo, &m_DescriptorSet));
device->SetName(m_SetLayout, "Bindless Layout");
device->SetName(m_DescriptorPool, "Bindless Pool");
device->SetName(m_DescriptorSet, "Bindless Set");
}
void
systems::RenderResourceManager::Commit(concepts::HandleType auto &handle)
{
using HandleType = decltype(handle)::Type;
if constexpr (std::is_same_v<HandleType, Buffer>)
{
const Buffer *buffer = handle.Fetch();
m_WriteInfos.emplace_back(vk::DescriptorBufferInfo{
.buffer = buffer->m_Buffer,
.offset = 0,
.range = buffer->GetSize(),
});
m_Writes.push_back({
.dstSet = m_DescriptorSet,
.dstBinding = BUFFER_BINDING_INDEX,
.dstArrayElement = handle.GetIndex(),
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eStorageBuffer,
.pBufferInfo = &m_WriteInfos.back().uBufferInfo,
});
}
else if constexpr (std::is_same_v<HandleType, Image>)
{
const Image *image = handle.Fetch();
m_WriteInfos.emplace_back(vk::DescriptorImageInfo{
.sampler = nullptr /* TODO Sampler */,
.imageView = image->m_View,
.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
});
m_Writes.push_back({
.dstSet = m_DescriptorSet,
.dstBinding = IMAGE_BINDING_INDEX,
.dstArrayElement = handle.GetIndex(),
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eSampledImage,
.pImageInfo = &m_WriteInfos.back().uImageInfo,
});
} else {
static_assert(false && "Type is currently unsupported");
}
m_WriteOwner.emplace_back(handle);
}
RenderResourceManager::WriteInfo::WriteInfo(const vk::DescriptorBufferInfo &info)
: uBufferInfo{info}
{
}
RenderResourceManager::WriteInfo::WriteInfo(const vk::DescriptorImageInfo &info)
: uImageInfo{info}
{
}
RenderResourceManager::WriteInfo::WriteInfo(const vk::BufferView &info)
: uBufferView{info}
{
}

File diff suppressed because it is too large Load Diff

View File

@ -1,150 +0,0 @@
// =============================================
// Aster: sync_server.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "aster/systems/sync_server.h"
#include "aster/systems/rendering_device.h"
using namespace systems::_internal;
SyncServer::Entry::Entry(RenderingDevice &device)
: m_CurrentPoint{0, 1}
, m_AttachedPool{nullptr}
{
constexpr static vk::SemaphoreTypeCreateInfo TYPE_CREATE_INFO = {
.semaphoreType = vk::SemaphoreType::eTimeline,
.initialValue = 0,
};
constexpr static vk::SemaphoreCreateInfo SEMAPHORE_CREATE_INFO = {.pNext = &TYPE_CREATE_INFO};
AbortIfFailed(device.m_Device->createSemaphore(&SEMAPHORE_CREATE_INFO, nullptr, &m_Semaphore));
}
void
SyncServer::Entry::Destroy(RenderingDevice &device)
{
if (m_Semaphore)
{
device.m_Device->destroy(Take(m_Semaphore), nullptr);
}
}
void
SyncServer::Entry::Wait(RenderingDevice &device)
{
vk::SemaphoreWaitInfo const waitInfo = {
.semaphoreCount = 1,
.pSemaphores = &m_Semaphore,
.pValues = &m_CurrentPoint.m_NextValue,
};
// This blocks.
// So `m_NextValue` is not modified while we wait for the signal.
AbortIfFailed(device.m_Device->waitSemaphores(&waitInfo, MaxValue<u64>));
// Thus, this is safe.
m_CurrentPoint.m_WaitValue = m_CurrentPoint.m_NextValue;
m_CurrentPoint.m_WaitValue = m_CurrentPoint.m_NextValue + 1;
if (m_AttachedPool)
{
m_AttachedPool->Reset();
m_AttachedPool = nullptr;
}
}
void
SyncServer::Entry::Next()
{
m_CurrentPoint.m_WaitValue = m_CurrentPoint.m_NextValue;
++m_CurrentPoint.m_NextValue;
}
void
SyncServer::Entry::AttachPool(ContextPool *pool)
{
assert(!m_AttachedPool);
m_AttachedPool = pool;
}
systems::Receipt
SyncServer::Allocate()
{
auto &entry = AllocateEntry();
return Receipt{&entry};
}
void
SyncServer::Free(Receipt const receipt)
{
FreeEntry(GetEntry(receipt));
}
void
SyncServer::WaitOn(Receipt const receipt)
{
auto &entry = GetEntry(receipt);
entry.Wait(*m_Device);
FreeEntry(entry);
}
SyncServer::Entry &
SyncServer::AllocateEntry()
{
if (not m_FreeList.empty())
{
auto &alloc = m_FreeList.back();
m_FreeList.pop_back();
return alloc;
}
return m_Allocations.emplace_back(*m_Device);
}
void
SyncServer::FreeEntry(Entry &entry)
{
entry.Next();
m_FreeList.push_back(entry);
}
SyncServer::Entry &
SyncServer::GetEntry(Receipt receipt)
{
return *static_cast<Entry *>(receipt.m_Opaque);
}
SyncServer::SyncServer(RenderingDevice &device)
: m_Device{&device}
{
}
SyncServer::~SyncServer()
{
if (m_Device && !m_Allocations.empty())
{
for (auto &entry : m_Allocations)
{
entry.Destroy(*m_Device);
}
m_Device = nullptr;
}
}
SyncServer::SyncServer(SyncServer &&other) noexcept
: m_Device{Take(other.m_Device)}
, m_Allocations{std::move(other.m_Allocations)}
, m_FreeList{Take(other.m_FreeList)}
{
}
SyncServer &
SyncServer::operator=(SyncServer &&other) noexcept
{
if (this == &other)
return *this;
m_Device = Take(other.m_Device);
m_Allocations = std::move(other.m_Allocations);
m_FreeList = Take(other.m_FreeList);
return *this;
}

View File

@ -2,4 +2,4 @@
cmake_minimum_required(VERSION 3.13)
target_sources(aster_core PRIVATE "logger.cpp" "files.cpp")
target_sources(aster_core PRIVATE "logger.cpp")

View File

@ -1,79 +0,0 @@
// =============================================
// Aster: files.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "aster/util/files.h"
eastl::vector<u32>
ReadFile(std::string_view fileName)
{
FILE *filePtr = fopen(fileName.data(), "rb");
if (!filePtr)
{
ERROR("Invalid read of {}", fileName) THEN_ABORT(-1);
}
eastl::vector<u32> outputVec;
eastl::array<u32, 1024> buffer{};
usize totalRead = 0;
usize readCount;
do
{
readCount = fread(buffer.data(), sizeof(u32), buffer.size(), filePtr);
auto const nextSize = totalRead + readCount;
outputVec.resize(nextSize);
memcpy(outputVec.data() + totalRead, buffer.data(), readCount * sizeof *buffer.data());
totalRead = nextSize;
} while (readCount == buffer.size());
return outputVec;
}
eastl::vector<u8>
ReadFileBytes(std::string_view fileName, bool errorOnFail)
{
FILE *filePtr = fopen(fileName.data(), "rb");
if (!filePtr)
{
ERROR_IF(errorOnFail, "Invalid open (r) of {}. Cause: {}", fileName, errno);
return {};
}
eastl::vector<u8> outputVec;
eastl::array<u8, 4096> buffer{};
usize totalRead = 0;
usize readCount;
do
{
readCount = fread(buffer.data(), sizeof(u8), buffer.size(), filePtr);
auto const nextSize = totalRead + readCount;
outputVec.resize(nextSize);
memcpy(outputVec.data() + totalRead, buffer.data(), readCount * sizeof *buffer.data());
totalRead = nextSize;
} while (readCount == buffer.size());
(void)fclose(filePtr);
return outputVec;
}
bool
WriteFileBytes(std::string_view fileName, eastl::span<u8> const data)
{
FILE *filePtr = fopen(fileName.data(), "wb");
if (!filePtr)
{
ERROR("Invalid open (w) of {}. Cause: {}", fileName, errno);
return false;
}
usize const written = fwrite(data.data(), sizeof(u8), data.size(), filePtr);
(void)fclose(filePtr);
return written == data.size();
}

View File

@ -1,18 +1,18 @@
// =============================================
// Aster: logger.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "util/logger.h"
auto g_Logger = Logger();
Logger g_Logger = Logger();
// ReSharper disable once CppInconsistentNaming
/* Credits to Const-me */
namespace eastl
{
void
AssertionFailure(char const *af)
AssertionFailure(const char *af)
{
ERROR("{}", af);
}

31
build.sh Executable file
View File

@ -0,0 +1,31 @@
#!/usr/bin/env bash
echo "Running CMake"
if grep 'NAME=NixOS' /etc/os-release
then
cmake --preset nixos
else
cmake --preset linux
fi
echo "Running Ninja"
if echo "$@" | grep -e "clean" -q
then
cmake --build build --target clean
elif echo "$@" | grep -e "rebuild" -q
then
cmake --build build --clean-first
else
cmake --build build
fi
if echo "$@" | grep -e "docs" -q
then
if echo "$@" | grep -e "-v" -q
then
doxygen
else
doxygen > /dev/null || echo "Doxygen Failed"
fi
fi

View File

@ -20,16 +20,16 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1742976680,
"narHash": "sha256-Lcyi6YyR0PgN5rOrmM6mM/1MJIYhGi6rrq0+eiqvUb4=",
"owner": "kidrigger",
"lastModified": 1738734093,
"narHash": "sha256-UEYOKfXXKU49fR7dGB05As0s2pGbLK4xDo48Qtdm7xs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "51cf54bdbd9c1a0a2f833cced82451df0d9c25bd",
"rev": "5b2753b0356d1c951d7a3ef1d086ba5a71fff43c",
"type": "github"
},
"original": {
"owner": "kidrigger",
"ref": "imgui-docking",
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}

View File

@ -1,6 +1,6 @@
{
inputs = {
nixpkgs.url = "github:kidrigger/nixpkgs/imgui-docking";
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
flake-utils.url = "github:numtide/flake-utils";
};
outputs = {self, nixpkgs, flake-utils }:
@ -17,7 +17,7 @@
with pkgs;
{
devShells.default = clangStdenv.mkDerivation {
name = "Aster-Env";
name = "BlazeEnv";
nativeBuildInputs = [
@ -26,10 +26,11 @@
ccls
clang-tools
lldb
(imgui.override {IMGUI_BUILD_VULKAN_BINDING = true; IMGUI_BUILD_GLFW_BINDING=true; IMGUI_EXPERIMENTAL_DOCKING = true; })
(imgui.override {IMGUI_BUILD_VULKAN_BINDING = true; IMGUI_BUILD_GLFW_BINDING=true; })
];
buildInputs = [
sdl3
glm
glfw3
eastl
@ -49,7 +50,6 @@
directx-shader-compiler
glslang
shaderc
shader-slang
];
};
}

14
run.sh Executable file
View File

@ -0,0 +1,14 @@
#!/usr/bin/env bash
if [ -d "build" ]; then
pushd ./build/samples/04_scenes/ > /dev/null || exit
if echo "$@" | grep -e "debug" -q
then
lldb ./scene_render
else
./scene_render
fi
popd > /dev/null || exit
else
echo "Build Aster first."
fi

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: frame.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "frame.h"
@ -17,7 +17,7 @@ Frame::Frame(const Device *device, const u32 queueFamilyIndex, const u32 frameCo
m_Device = device;
eastl::fixed_string<char, 50, false> name = "Frame ";
name += static_cast<char>('0' + frameCount);
name += Cast<char>('0' + frameCount);
const vk::CommandPoolCreateInfo commandPoolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient,
.queueFamilyIndex = queueFamilyIndex,
@ -71,7 +71,7 @@ Frame::Present(const vk::Queue commandQueue, Swapchain *swapchain, const Surface
case vk::Result::eErrorOutOfDateKHR:
case vk::Result::eSuboptimalKHR:
DEBUG("Recreating Swapchain. Cause: {}", result);
swapchain->Create(*surface, size);
swapchain->Create(surface, size);
break; // Present failed. We do nothing. Frame is skipped.
default:
AbortIfFailedM(result, "Swapchain Present failed.");
@ -154,7 +154,7 @@ FrameManager::GetNextFrame(Swapchain *swapchain, const Surface *surface, Size2D
break; // Image acquired. Break out of loop.
case vk::Result::eErrorOutOfDateKHR:
DEBUG("Recreating Swapchain. Cause: {}", result);
swapchain->Create(*surface, size);
swapchain->Create(surface, size);
break; // Image acquire has failed. We move to the next frame.
default:
AbortIfFailedMV(result, "Waiting for swapchain image {} failed.", frameIndex);

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: frame.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
@ -31,7 +31,7 @@ struct Frame
// Transient
u32 m_ImageIdx;
void Present(vk::Queue commandQueue, Swapchain *swapchain, const Surface *surface, Size2D size);
void Present(const vk::Queue commandQueue, Swapchain* swapchain, const Surface* surface, Size2D size);
Frame(const Device *device, u32 queueFamilyIndex, u32 frameCount);
~Frame();

View File

@ -1,14 +1,13 @@
// =============================================
// Aster: gui.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "gui.h"
#include "aster/core/context.h"
#include "aster/core/device.h"
#include "aster/core/instance.h"
#include "aster/core/window.h"
#include "aster/systems/rendering_device.h"
#include "helpers.h"
#include <imgui_impl_glfw.h>
@ -27,74 +26,8 @@ VulkanAssert(VkResult result)
}
void
Init(systems::RenderingDevice &device, Window &window)
{
g_AttachmentFormat = device.m_Swapchain.m_Format;
eastl::vector<vk::DescriptorPoolSize> poolSizes = {
{vk::DescriptorType::eSampler, 1000},
{vk::DescriptorType::eCombinedImageSampler, 1000},
{vk::DescriptorType::eSampledImage, 1000},
{vk::DescriptorType::eStorageImage, 1000},
{vk::DescriptorType::eUniformTexelBuffer, 1000},
{vk::DescriptorType::eStorageTexelBuffer, 1000},
{vk::DescriptorType::eUniformBuffer, 1000},
{vk::DescriptorType::eStorageBuffer, 1000},
{vk::DescriptorType::eUniformBufferDynamic, 1000},
{vk::DescriptorType::eStorageBufferDynamic, 1000},
{vk::DescriptorType::eInputAttachment, 1000},
};
vk::DescriptorPoolCreateInfo const descriptorPoolCreateInfo = {
.flags = vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet,
.maxSets = 1000,
.poolSizeCount = static_cast<u32>(poolSizes.size()),
.pPoolSizes = poolSizes.data(),
};
AbortIfFailed(device.m_Device->createDescriptorPool(&descriptorPoolCreateInfo, nullptr, &g_DescriptorPool));
IMGUI_CHECKVERSION();
CreateContext();
ImGuiIO &io = GetIO();
(void)io;
// io.ConfigFlags |= ImGuiConfigFlags_DockingEnable;
// io.ConfigFlags |= ImGuiConfigFlags_ViewportsEnable; // Viewports bad
StyleColorsDark();
ImGui_ImplGlfw_InitForVulkan(window.m_Window, true);
vk::PipelineRenderingCreateInfo renderingCreateInfo = {
.colorAttachmentCount = 1,
.pColorAttachmentFormats = &g_AttachmentFormat,
};
// TODO: Switch this into being managed by RenderingDevice.
// m_Instance etc should private.
ImGui_ImplVulkan_InitInfo imguiVulkanInitInfo = {
.Instance = device.m_Instance.m_Instance,
.PhysicalDevice = device.m_Device.m_PhysicalDevice,
.Device = device.m_Device.m_Device,
.QueueFamily = device.m_PrimaryQueueFamily,
.Queue = device.m_PrimaryQueue,
.DescriptorPool = g_DescriptorPool,
.MinImageCount = static_cast<u32>(device.m_Swapchain.m_Images.size()),
.ImageCount = static_cast<u32>(device.m_Swapchain.m_Images.size()),
.PipelineCache = nullptr,
.UseDynamicRendering = true,
.PipelineRenderingCreateInfo = renderingCreateInfo,
.Allocator = nullptr,
.CheckVkResultFn = VulkanAssert,
};
ImGui_ImplVulkan_Init(&imguiVulkanInitInfo);
ImGui_ImplVulkan_CreateFontsTexture();
}
void
Init(Instance const *context, Device const *device, Window const *window, vk::Format attachmentFormat,
u32 const imageCount, u32 const queueFamily, vk::Queue const queue)
Init(const Context *context, const Device *device, const Window *window, vk::Format attachmentFormat,
const u32 imageCount, const u32 queueFamily, const vk::Queue queue)
{
g_AttachmentFormat = attachmentFormat;
@ -112,10 +45,10 @@ Init(Instance const *context, Device const *device, Window const *window, vk::Fo
{vk::DescriptorType::eInputAttachment, 1000},
};
vk::DescriptorPoolCreateInfo const descriptorPoolCreateInfo = {
const vk::DescriptorPoolCreateInfo descriptorPoolCreateInfo = {
.flags = vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet,
.maxSets = 1000,
.poolSizeCount = static_cast<u32>(poolSizes.size()),
.poolSizeCount = Cast<u32>(poolSizes.size()),
.pPoolSizes = poolSizes.data(),
};
@ -158,18 +91,9 @@ Init(Instance const *context, Device const *device, Window const *window, vk::Fo
}
void
Destroy(systems::RenderingDevice const &device)
Destroy(const Device *device)
{
ImGui_ImplVulkan_Shutdown();
ImGui_ImplGlfw_Shutdown();
DestroyContext();
device.m_Device->destroy(Take(g_DescriptorPool), nullptr);
}
void
Destroy(Device const *device)
{
ImGui_ImplVulkan_Shutdown();
ImGui_ImplGlfw_Shutdown();
DestroyContext();
@ -190,7 +114,7 @@ StartBuild()
// because it would be confusing to have two docking targets within each others.
ImGuiWindowFlags windowFlags = ImGuiWindowFlags_None | ImGuiWindowFlags_NoDocking;
ImGuiViewport const *viewport = GetMainViewport();
const ImGuiViewport *viewport = GetMainViewport();
SetNextWindowPos(viewport->WorkPos);
SetNextWindowSize(viewport->WorkSize);
// SetNextWindowViewport(viewport->ID);
@ -215,7 +139,7 @@ StartBuild()
// DockSpace
if (GetIO().ConfigFlags & ImGuiConfigFlags_DockingEnable)
{
ImGuiID const dockspaceId = GetID("MyDockSpace");
const ImGuiID dockspaceId = GetID("MyDockSpace");
DockSpace(dockspaceId, ImVec2(0.0f, 0.0f), dockspaceFlags);
}
}
@ -237,7 +161,7 @@ EndBuild()
}
void
Draw(vk::CommandBuffer const commandBuffer, vk::Extent2D const extent, vk::ImageView const view)
Draw(const vk::CommandBuffer commandBuffer, const vk::Extent2D extent, const vk::ImageView view)
{
// OPTICK_EVENT();
@ -257,7 +181,7 @@ Draw(vk::CommandBuffer const commandBuffer, vk::Extent2D const extent, vk::Image
.clearValue = vk::ClearColorValue{0.0f, 0.0f, 0.0f, 1.0f},
};
vk::RenderingInfo const renderingInfo = {
const vk::RenderingInfo renderingInfo = {
.renderArea = {.extent = extent},
.layerCount = 1,
.colorAttachmentCount = 1,
@ -276,36 +200,6 @@ Draw(vk::CommandBuffer const commandBuffer, vk::Extent2D const extent, vk::Image
#endif
}
void
Draw(systems::Frame &frame, systems::GraphicsContext &context)
{
context.BeginDebugRegion("UI Pass", {0.9f, 0.9f, 1.0f, 1.0f});
vk::RenderingAttachmentInfo attachmentInfo = {
.imageView = frame.m_SwapchainImageView,
.imageLayout = vk::ImageLayout::eColorAttachmentOptimal,
.resolveMode = vk::ResolveModeFlagBits::eNone,
.loadOp = vk::AttachmentLoadOp::eLoad,
.storeOp = vk::AttachmentStoreOp::eStore,
.clearValue = vk::ClearColorValue{0.0f, 0.0f, 0.0f, 1.0f},
};
vk::RenderingInfo const renderingInfo = {
.renderArea = {.extent = frame.m_SwapchainSize},
.layerCount = 1,
.colorAttachmentCount = 1,
.pColorAttachments = &attachmentInfo,
.pDepthAttachment = nullptr,
};
context.BeginRendering(renderingInfo);
ImGui_ImplVulkan_RenderDrawData(GetDrawData(), context.GetCommandBuffer());
context.EndRendering();
context.EndDebugRegion();
}
void
PushDisable()
{

View File

@ -1,41 +1,31 @@
// =============================================
// Aster: gui.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "aster/aster.h"
#include "aster/core/device.h"
#include <imgui.h>
struct AttachmentImage;
struct Device;
struct Instance;
struct Context;
struct Window;
struct Swapchain;
namespace systems
{
class RenderingDevice;
class GraphicsContext;
struct Frame;
}
// ReSharper disable once CppInconsistentNaming
namespace ImGui
{
void Init(systems::RenderingDevice &device, Window &window);
void Init(const Instance *context, const Device *device, const Window *window, vk::Format attachmentFormat,
void Init(const Context *context, const Device *device, const Window *window, vk::Format attachmentFormat,
u32 imageCount, u32 queueFamily, vk::Queue queue);
void Destroy(const systems::RenderingDevice &device);
void Destroy(const Device *device);
void Recreate();
void StartBuild();
void EndBuild();
void Draw(vk::CommandBuffer commandBuffer, vk::Extent2D extent, vk::ImageView view);
void Draw(systems::Frame &frame, systems::GraphicsContext &context);
void PushDisable();
void PopDisable();

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: helpers.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "helpers.h"
@ -15,24 +15,24 @@ constexpr QueueSupportFlags REQUIRED_QUEUE_SUPPORT = QueueSupportFlags{} | Queue
QueueSupportFlagBits::eTransfer;
bool
IsSuitableDevice(PhysicalDevice const *physicalDevice)
IsSuitableDevice(const PhysicalDevice *physicalDevice)
{
bool const hasAllRequiredQueues =
std::ranges::any_of(physicalDevice->m_QueueFamilies, [](auto const &queueFamilyProp) {
const bool hasAllRequiredQueues =
std::ranges::any_of(physicalDevice->m_QueueFamilies, [](const auto &queueFamilyProp) {
return (queueFamilyProp.m_Support & REQUIRED_QUEUE_SUPPORT) == REQUIRED_QUEUE_SUPPORT;
});
bool const isNotCpu = physicalDevice->m_DeviceProperties.deviceType != vk::PhysicalDeviceType::eCpu;
const bool isNotCpu = physicalDevice->m_DeviceProperties.deviceType != vk::PhysicalDeviceType::eCpu;
bool const hasPresentMode = !physicalDevice->m_PresentModes.empty();
const bool hasPresentMode = !physicalDevice->m_PresentModes.empty();
bool const hasSurfaceFormat = !physicalDevice->m_SurfaceFormats.empty();
const bool hasSurfaceFormat = !physicalDevice->m_SurfaceFormats.empty();
return hasSurfaceFormat && hasPresentMode && isNotCpu && hasAllRequiredQueues;
}
PhysicalDevice
FindSuitableDevice(PhysicalDevices const &physicalDevices)
FindSuitableDevice(const PhysicalDevices &physicalDevices)
{
for (auto &physicalDevice : physicalDevices)
{
@ -47,7 +47,7 @@ FindSuitableDevice(PhysicalDevices const &physicalDevices)
}
QueueAllocation
FindAppropriateQueueAllocation(PhysicalDevice const *physicalDevice)
FindAppropriateQueueAllocation(const PhysicalDevice *physicalDevice)
{
for (auto &queueFamilyInfo : physicalDevice->m_QueueFamilies)
{
@ -62,3 +62,76 @@ FindAppropriateQueueAllocation(PhysicalDevice const *physicalDevice)
ERROR("No suitable queue family on the GPU.")
THEN_ABORT(vk::Result::eErrorUnknown);
}
eastl::vector<u32>
ReadFile(cstr fileName)
{
FILE *filePtr = fopen(fileName, "rb");
if (!filePtr)
{
ERROR("Invalid read of {}", fileName) THEN_ABORT(-1);
}
eastl::vector<u32> outputVec;
eastl::array<u32, 1024> buffer{};
usize totalRead = 0;
usize readCount;
do
{
readCount = fread(buffer.data(), sizeof(u32), buffer.size(), filePtr);
const auto nextSize = totalRead + readCount;
outputVec.resize(nextSize);
memcpy(outputVec.data() + totalRead, buffer.data(), readCount * sizeof *buffer.data());
totalRead = nextSize;
} while (readCount == buffer.size());
return outputVec;
}
eastl::vector<u8>
ReadFileBytes(cstr fileName, bool errorOnFail)
{
FILE *filePtr = fopen(fileName, "rb");
if (!filePtr)
{
ERROR_IF(errorOnFail, "Invalid open (r) of {}. Cause: {}", fileName, errno);
return {};
}
eastl::vector<u8> outputVec;
eastl::array<u8, 4096> buffer{};
usize totalRead = 0;
usize readCount;
do
{
readCount = fread(buffer.data(), sizeof(u8), buffer.size(), filePtr);
const auto nextSize = totalRead + readCount;
outputVec.resize(nextSize);
memcpy(outputVec.data() + totalRead, buffer.data(), readCount * sizeof *buffer.data());
totalRead = nextSize;
} while (readCount == buffer.size());
(void)fclose(filePtr);
return outputVec;
}
bool
WriteFileBytes(cstr fileName, eastl::span<u8> data)
{
FILE *filePtr = fopen(fileName, "wb");
if (!filePtr)
{
ERROR("Invalid open (w) of {}. Cause: {}", fileName, errno);
return false;
}
const usize written = fwrite(data.data(), sizeof(u8), data.size(), filePtr);
(void)fclose(filePtr);
return written == data.size();
}

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: helpers.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
@ -18,6 +18,9 @@ class PhysicalDevices;
PhysicalDevice FindSuitableDevice(const PhysicalDevices &physicalDevices);
QueueAllocation FindAppropriateQueueAllocation(const PhysicalDevice *physicalDevice);
eastl::vector<u32> ReadFile(cstr fileName);
eastl::vector<u8> ReadFileBytes(cstr fileName, bool errorOnFail = true);
bool WriteFileBytes(cstr fileName, eastl::span<u8> data);
template <usize TSize>
using StackString = eastl::fixed_string<char, TSize, false>;
@ -26,7 +29,7 @@ using StackString = eastl::fixed_string<char, TSize, false>;
do \
{ \
vk::Result _checkResultValue_; \
ERROR_IF(Failed(_checkResultValue_ = static_cast<vk::Result>(RESULT)), "Cause: {}", _checkResultValue_) \
ERROR_IF(Failed(_checkResultValue_ = Cast<vk::Result>(RESULT)), "Cause: {}", _checkResultValue_) \
THEN_ABORT(_checkResultValue_); \
} while (false)
@ -34,13 +37,13 @@ using StackString = eastl::fixed_string<char, TSize, false>;
do \
{ \
vk::Result _checkResultValue_; \
ERROR_IF(Failed(_checkResultValue_ = static_cast<vk::Result>(RESULT)), MSG " Cause: {}", EXTRA, _checkResultValue_) \
ERROR_IF(Failed(_checkResultValue_ = Cast<vk::Result>(RESULT)), MSG " Cause: {}", EXTRA, _checkResultValue_) \
THEN_ABORT(_checkResultValue_); \
} while (false)
#define AbortIfFailedM(RESULT, MSG) \
do \
{ \
auto _checkResultValue_ = static_cast<vk::Result>(RESULT); \
auto _checkResultValue_ = Cast<vk::Result>(RESULT); \
ERROR_IF(Failed(_checkResultValue_), MSG " Cause: {}", _checkResultValue_) THEN_ABORT(_checkResultValue_); \
} while (false)

View File

@ -3,8 +3,8 @@
cmake_minimum_required(VERSION 3.13)
add_executable(triangle "triangle.cpp")
add_shader(triangle "shader/triangle.slang")
add_resource_dir(triangle "shader")
add_shader(triangle "shader/triangle.vert.glsl")
add_shader(triangle "shader/triangle.frag.glsl")
target_link_libraries(triangle PRIVATE aster_core)
target_link_libraries(triangle PRIVATE util_helper)

View File

@ -0,0 +1,9 @@
#version 450
#pragma shader_stage(fragment)
layout (location = 0) in vec3 inColor;
layout (location = 0) out vec4 outColor;
void main() {
outColor = vec4(inColor, 1.0);
}

View File

@ -1,35 +0,0 @@
struct Vertex {
float3 point;
float3 color;
};
struct VSIn {
Vertex vertex;
};
struct VSOut
{
float4 Pos : SV_POSITION;
float3 Color : COLOR0;
};
[shader("vertex")]
VSOut vsmain(VSIn input) {
VSOut output;
output.Pos = float4(input.vertex.point, 1.0f);
output.Color = input.vertex.color;
return output;
}
struct FSOut {
float4 Color;
};
[shader("fragment")]
FSOut fsmain(VSOut input) {
FSOut outp;
outp.Color = float4(input.Color, 1.0);
return outp;
}

View File

@ -0,0 +1,27 @@
#version 450
#pragma shader_stage(vertex)
layout(location=0) in vec4 position;
layout(location=1) in vec4 color;
layout(location=0) out vec3 outColor;
void main() {
/*
vec3 points[] = {
vec3(-0.5f, -0.5f, 0.0f),
vec3(0.5f, -0.5f, 0.0f),
vec3(0.0f, 0.5f, 0.0f)
};
vec3 colors[] = {
vec3( 1.0f, 0.0f, 0.0f ),
vec3( 0.0f, 1.0f, 0.0f ),
vec3( 0.0f, 0.0f, 1.0f ),
};
gl_Position = vec4(points[gl_VertexIndex], 1.0f);
outColor = vec3(colors[gl_VertexIndex]); //*/
//*
gl_Position = vec4(position.xyz, 1.0f);
outColor = vec3(color.rgb); //*/
}

View File

@ -0,0 +1,28 @@
struct VSIn {
int idx : SV_VERTEXID;
};
struct VSOut
{
float4 Pos : SV_POSITION;
[[vk::location(0)]] float3 Color : COLOR0;
};
VSOut main(VSIn input) {
float3 points[] = {
float3(-0.5f, -0.5f, 0.0f),
float3(0.5f, -0.5f, 0.0f),
float3(0.0f, 0.5f, 0.0f)
};
float3 colors[] = {
float3( 1.0f, 0.0f, 0.0f ),
float3( 0.0f, 1.0f, 0.0f ),
float3( 0.0f, 0.0f, 1.0f ),
};
VSOut output;
output.Pos = float4(points[input.idx], 1.0f);
output.Color = colors[input.idx];
return output;
}

View File

@ -1,78 +1,115 @@
// =============================================
// Aster: triangle.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "aster/aster.h"
#include "aster/core/buffer.h"
#include "aster/core/constants.h"
#include "aster/core/instance.h"
#include "aster/core/context.h"
#include "aster/core/device.h"
#include "aster/core/physical_device.h"
#include "aster/core/pipeline.h"
#include "aster/core/swapchain.h"
#include "aster/core/window.h"
#include "aster/core/pipeline.h"
#include "aster/systems/rendering_device.h"
#include "aster/util/files.h"
#include "helpers.h"
#include <EASTL/array.h>
constexpr auto SHADER_MODULE = "triangle.slang";
constexpr u32 MAX_FRAMES_IN_FLIGHT = 3;
constexpr auto VERTEX_SHADER_FILE = "shader/triangle.vert.glsl.spv";
constexpr auto FRAGMENT_SHADER_FILE = "shader/triangle.frag.glsl.spv";
vk::ShaderModule CreateShader(const Device *device, cstr shaderFile);
Pipeline CreatePipeline(const Device *device, const Swapchain *swapchain);
struct Vertex
{
vec3 m_Position;
vec3 m_Color;
static eastl::vector<systems::AttributeInfo>
GetAttributes()
constexpr static vk::VertexInputBindingDescription
GetBinding(const u32 binding)
{
return {.binding = binding, .stride = sizeof(Vertex), .inputRate = vk::VertexInputRate::eVertex};
}
constexpr static eastl::array<vk::VertexInputAttributeDescription, 2>
GetAttributes(const u32 binding)
{
return {
{
.m_Location = 0,
.m_Offset = offsetof(Vertex, m_Position),
.m_Format = systems::AttributeInfo::Format::eFloat32X3,
vk::VertexInputAttributeDescription{
.location = 0,
.binding = binding,
.format = vk::Format::eR32G32B32Sfloat,
.offset = offsetof(Vertex, m_Position),
},
{
.m_Location = 1,
.m_Offset = offsetof(Vertex, m_Color),
.m_Format = systems::AttributeInfo::Format::eFloat32X3,
vk::VertexInputAttributeDescription{
.location = 1,
.binding = binding,
.format = vk::Format::eR32G32B32Sfloat,
.offset = offsetof(Vertex, m_Color),
},
};
}
};
struct Frame
{
const Device *m_Device;
vk::CommandPool m_Pool;
vk::CommandBuffer m_CommandBuffer;
vk::Fence m_FrameAvailableFence;
vk::Semaphore m_ImageAcquireSem;
vk::Semaphore m_RenderFinishSem;
Frame(const Device *device, u32 queueFamilyIndex, u32 frameCount);
~Frame();
};
int
main(int, char **)
{
MIN_LOG_LEVEL(Logger::LogType::eInfo);
Window window = {"Triangle (Aster)", {640, 480}};
systems::RenderingDevice device{{
.m_Window = window,
.m_Features = {.m_Vulkan12Features = {.bufferDeviceAddress = true},
.m_Vulkan13Features = {.synchronization2 = true, .dynamicRendering = true}},
.m_AppName = "Triangle",
.m_ShaderSearchPaths = {"shader/"},
.m_UseBindless = false,
.m_Name = "Primary",
}};
Context context = {"Triangle", VERSION};
Surface surface = {&context, &window, "Primary"};
Pipeline pipeline;
auto pipelineError = device.CreateGraphicsPipeline(pipeline, {
.m_VertexInputs = {{
.m_Attribute = Vertex::GetAttributes(),
.m_Stride = sizeof(Vertex),
}},
.m_Shaders = {{
.m_ShaderFile = SHADER_MODULE,
.m_EntryPoints = {"vsmain", "fsmain"},
}},
});
ERROR_IF(pipelineError, "Error creating pipeline. Cause: {}", pipelineError.What());
PhysicalDevices physicalDevices = {&surface, &context};
PhysicalDevice deviceToUse = FindSuitableDevice(physicalDevices);
INFO("Using {} as the primary device.", deviceToUse.m_DeviceProperties.deviceName.data());
Features enabledDeviceFeatures = {
.m_Vulkan12Features = {.bufferDeviceAddress = true},
.m_Vulkan13Features = {.synchronization2 = true, .dynamicRendering = true},
};
QueueAllocation queueAllocation = FindAppropriateQueueAllocation(&deviceToUse);
Device device = {&context, &deviceToUse, &enabledDeviceFeatures, {queueAllocation}, "Primary Device"};
vk::Queue commandQueue = device.GetQueue(queueAllocation.m_Family, 0);
Swapchain swapchain = {&surface, &device, window.GetSize(), "Primary Chain"};
Pipeline pipeline = CreatePipeline(&device, &swapchain);
vk::CommandPool copyPool;
vk::CommandBuffer copyBuffer;
{
vk::CommandPoolCreateInfo poolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient,
.queueFamilyIndex = queueAllocation.m_Family,
};
auto result = device.m_Device.createCommandPool(&poolCreateInfo, nullptr, &copyPool);
ERROR_IF(Failed(result), "Copy command pool creation failed. Cause: {}", result) THEN_ABORT(result);
vk::CommandBufferAllocateInfo bufferAllocateInfo = {
.commandPool = copyPool,
.level = vk::CommandBufferLevel::ePrimary,
.commandBufferCount = 1,
};
result = device.m_Device.allocateCommandBuffers(&bufferAllocateInfo, &copyBuffer);
ERROR_IF(Failed(result), "Copy command buffer allocation failed. Cause: {}", result) THEN_ABORT(result);
}
// eastl::array<Vertex, 3> vertices{};
eastl::array vertices = {
@ -80,10 +117,60 @@ main(int, char **)
Vertex{.m_Position = {0.5f, -0.5f, 0.0f}, .m_Color = {0.0f, 1.0f, 0.0f}},
Vertex{.m_Position = {0.0f, 0.5f, 0.0f}, .m_Color = {0.0f, 0.0f, 1.0f}},
};
auto vbo = device.CreateVertexBuffer(vertices.size() * sizeof vertices[0], "VBO");
vbo->Write(0, vertices.size() * sizeof vertices[0], vertices.data());
VertexBuffer vbo;
vbo.Init(&device, vertices.size() * sizeof vertices[0], "VBO");
{
StagingBuffer staging;
staging.Init(&device, vertices.size() * sizeof vertices[0], "Staging");
staging.Write(&device, 0, vertices.size() * sizeof vertices[0], vertices.data());
vk::Fence fence;
vk::FenceCreateInfo fenceCreateInfo = {};
auto result = device.m_Device.createFence(&fenceCreateInfo, nullptr, &fence);
ERROR_IF(Failed(result), "Fence creation failed. Cause: {}", result) THEN_ABORT(result);
vk::CommandBufferBeginInfo beginInfo = {.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit};
result = copyBuffer.begin(&beginInfo);
ERROR_IF(Failed(result), "Copy begin failed. Cause: {}", result) THEN_ABORT(result);
vk::BufferCopy bufferCopy = {.srcOffset = 0, .dstOffset = 0, .size = staging.GetSize()};
copyBuffer.copyBuffer(staging.m_Buffer, vbo.m_Buffer, 1, &bufferCopy);
result = copyBuffer.end();
ERROR_IF(Failed(result), "Copy end failed. Cause: {}", result) THEN_ABORT(result);
vk::SubmitInfo submitInfo = {
.commandBufferCount = 1,
.pCommandBuffers = &copyBuffer,
};
result = commandQueue.submit(1, &submitInfo, fence);
ERROR_IF(Failed(result), "Submit failed. Cause: {}", result) THEN_ABORT(result) ELSE_INFO("Submit copy");
result = device.m_Device.waitForFences(1, &fence, true, MaxValue<u64>);
ERROR_IF(Failed(result), "Fence wait failed. Cause: {}", result) THEN_ABORT(result) ELSE_INFO("Fence wait");
result = device.m_Device.resetCommandPool(copyPool, {});
ERROR_IF(Failed(result), "Couldn't reset command pool. Cause: {}", result) THEN_ABORT(result);
device.m_Device.destroy(fence, nullptr);
staging.Destroy(&device);
}
// Persistent variables
vk::Viewport viewport = {
.x = 0,
.y = Cast<f32>(swapchain.m_Extent.height),
.width = Cast<f32>(swapchain.m_Extent.width),
.height = -Cast<f32>(swapchain.m_Extent.height),
.minDepth = 0.0,
.maxDepth = 1.0,
};
vk::Rect2D scissor = {
.offset = {0, 0},
.extent = swapchain.m_Extent,
};
vk::ImageSubresourceRange subresourceRange = {
.aspectMask = vk::ImageAspectFlagBits::eColor,
@ -99,8 +186,8 @@ main(int, char **)
.dstAccessMask = vk::AccessFlagBits2::eColorAttachmentWrite,
.oldLayout = vk::ImageLayout::eUndefined,
.newLayout = vk::ImageLayout::eColorAttachmentOptimal,
.srcQueueFamilyIndex = vk::QueueFamilyIgnored,
.dstQueueFamilyIndex = vk::QueueFamilyIgnored,
.srcQueueFamilyIndex = queueAllocation.m_Family,
.dstQueueFamilyIndex = queueAllocation.m_Family,
.subresourceRange = subresourceRange,
};
vk::DependencyInfo topOfThePipeDependency = {
@ -114,8 +201,8 @@ main(int, char **)
.dstAccessMask = vk::AccessFlagBits2::eNone,
.oldLayout = vk::ImageLayout::eColorAttachmentOptimal,
.newLayout = vk::ImageLayout::ePresentSrcKHR,
.srcQueueFamilyIndex = vk::QueueFamilyIgnored,
.dstQueueFamilyIndex = vk::QueueFamilyIgnored,
.srcQueueFamilyIndex = queueAllocation.m_Family,
.dstQueueFamilyIndex = queueAllocation.m_Family,
.subresourceRange = subresourceRange,
};
vk::DependencyInfo renderToPresentDependency = {
@ -123,39 +210,70 @@ main(int, char **)
.pImageMemoryBarriers = &renderToPresentBarrier,
};
// Frames
eastl::fixed_vector<Frame, MAX_FRAMES_IN_FLIGHT> frames;
for (u32 i = 0; i < MAX_FRAMES_IN_FLIGHT; ++i)
{
frames.emplace_back(&device, queueAllocation.m_Family, i);
}
INFO("Starting loop");
u32 frameIndex = 0;
while (window.Poll())
{
systems::Frame &currentFrame = device.GetNextFrame();
Frame *currentFrame = &frames[frameIndex];
Size2D swapchainSize = currentFrame.m_SwapchainSize;
auto result = device.m_Device.waitForFences(1, &currentFrame->m_FrameAvailableFence, true, MaxValue<u64>);
ERROR_IF(Failed(result), "Waiting for fence {} failed. Cause: {}", frameIndex, result)
THEN_ABORT(result);
vk::Viewport viewport = {
.x = 0,
.y = static_cast<f32>(swapchainSize.m_Height),
.width = static_cast<f32>(swapchainSize.m_Width),
.height = -static_cast<f32>(swapchainSize.m_Height),
.minDepth = 0.0,
.maxDepth = 1.0,
};
u32 imageIndex;
result = device.m_Device.acquireNextImageKHR(swapchain.m_Swapchain, MaxValue<u64>,
currentFrame->m_ImageAcquireSem, nullptr, &imageIndex);
if (Failed(result))
{
switch (result)
{
case vk::Result::eErrorOutOfDateKHR:
case vk::Result::eSuboptimalKHR:
INFO("Recreating Swapchain. Cause: {}", result);
swapchain.Create(&surface, window.GetSize());
viewport.y = Cast<f32>(swapchain.m_Extent.height);
viewport.width = Cast<f32>(swapchain.m_Extent.width);
viewport.height = -Cast<f32>(swapchain.m_Extent.height);
scissor.extent = swapchain.m_Extent;
continue; // Image acquire has failed. We move to the next frame.
default:
ERROR("Waiting for swapchain image {} failed. Cause: {}", frameIndex, result)
THEN_ABORT(result);
}
}
// Reset fences here. In case swapchain was out of date, we leave the fences signalled.
result = device.m_Device.resetFences(1, &currentFrame->m_FrameAvailableFence);
ERROR_IF(Failed(result), "Fence {} reset failed. Cause: {}", frameIndex, result)
THEN_ABORT(result);
vk::Rect2D scissor = {
.offset = {0, 0},
.extent = static_cast<vk::Extent2D>(swapchainSize),
};
result = device.m_Device.resetCommandPool(currentFrame->m_Pool, {});
ERROR_IF(Failed(result), "Command pool {} reset failed. Cause: {}", frameIndex, result)
THEN_ABORT(result);
auto context = currentFrame.CreateGraphicsContext();
vk::ImageView currentImageView = swapchain.m_ImageViews[imageIndex];
vk::Image currentImage = swapchain.m_Images[imageIndex];
vk::CommandBuffer cmd = currentFrame->m_CommandBuffer;
topOfThePipeBarrier.image = currentFrame.m_SwapchainImage;
renderToPresentBarrier.image = currentFrame.m_SwapchainImage;
topOfThePipeBarrier.image = currentImage;
renderToPresentBarrier.image = currentImage;
context.Begin();
vk::CommandBufferBeginInfo beginInfo = {.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit};
result = cmd.begin(&beginInfo);
ERROR_IF(Failed(result), "Command buffer begin failed. Cause: {}", result)
THEN_ABORT(result);
context.Dependency(topOfThePipeDependency);
cmd.pipelineBarrier2(&topOfThePipeDependency);
// Render
vk::RenderingAttachmentInfo attachmentInfo = {
.imageView = currentFrame.m_SwapchainImageView,
.imageView = currentImageView,
.imageLayout = vk::ImageLayout::eColorAttachmentOptimal,
.resolveMode = vk::ResolveModeFlagBits::eNone,
.loadOp = vk::AttachmentLoadOp::eClear,
@ -164,29 +282,265 @@ main(int, char **)
};
vk::RenderingInfo renderingInfo = {
.renderArea = scissor,
.renderArea = {.extent = swapchain.m_Extent},
.layerCount = 1,
.colorAttachmentCount = 1,
.pColorAttachments = &attachmentInfo,
};
context.BeginRendering(renderingInfo);
cmd.beginRendering(&renderingInfo);
context.SetViewport(viewport);
context.BindPipeline(pipeline);
context.BindVertexBuffer(vbo);
context.Draw(3);
cmd.setViewport(0, 1, &viewport);
cmd.setScissor(0, 1, &scissor);
cmd.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline.m_Pipeline);
usize offsets = 0;
cmd.bindVertexBuffers(0, 1, &vbo.m_Buffer, &offsets);
cmd.draw(3, 1, 0, 0);
context.EndRendering();
cmd.endRendering();
context.Dependency(renderToPresentDependency);
cmd.pipelineBarrier2(&renderToPresentDependency);
context.End();
result = cmd.end();
ERROR_IF(Failed(result), "Command buffer end failed. Cause: {}", result)
THEN_ABORT(result);
device.Present(currentFrame, context);
vk::PipelineStageFlags waitDstStage = vk::PipelineStageFlagBits::eColorAttachmentOutput;
vk::SubmitInfo submitInfo = {
.waitSemaphoreCount = 1,
.pWaitSemaphores = &currentFrame->m_ImageAcquireSem,
.pWaitDstStageMask = &waitDstStage,
.commandBufferCount = 1,
.pCommandBuffers = &cmd,
.signalSemaphoreCount = 1,
.pSignalSemaphores = &currentFrame->m_RenderFinishSem,
};
result = commandQueue.submit(1, &submitInfo, currentFrame->m_FrameAvailableFence);
ERROR_IF(Failed(result), "Command queue submit failed. Cause: {}", result)
THEN_ABORT(result);
vk::PresentInfoKHR presentInfo = {
.waitSemaphoreCount = 1,
.pWaitSemaphores = &currentFrame->m_RenderFinishSem,
.swapchainCount = 1,
.pSwapchains = &swapchain.m_Swapchain,
.pImageIndices = &imageIndex,
.pResults = nullptr,
};
result = commandQueue.presentKHR(&presentInfo);
if (Failed(result))
{
switch (result)
{
case vk::Result::eErrorOutOfDateKHR:
case vk::Result::eSuboptimalKHR:
INFO("Recreating Swapchain. Cause: {}", result);
swapchain.Create(&surface, window.GetSize());
viewport.y = Cast<f32>(swapchain.m_Extent.height);
viewport.width = Cast<f32>(swapchain.m_Extent.width);
viewport.height = -Cast<f32>(swapchain.m_Extent.height);
scissor.extent = swapchain.m_Extent;
break; // Present failed. We redo the frame.
default:
ERROR("Command queue present failed. Cause: {}", result)
THEN_ABORT(result);
}
}
frameIndex = (frameIndex + 1) % MAX_FRAMES_IN_FLIGHT;
}
device.WaitIdle();
device.m_Device.destroy(copyPool, nullptr);
vbo.Destroy(&device);
return 0;
}
Frame::Frame(const Device *device, const u32 queueFamilyIndex, const u32 frameCount)
{
m_Device = device;
const vk::CommandPoolCreateInfo commandPoolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient,
.queueFamilyIndex = queueFamilyIndex,
};
vk::Result result = device->m_Device.createCommandPool(&commandPoolCreateInfo, nullptr, &m_Pool);
ERROR_IF(Failed(result), "Could not command pool for frame {}. Cause: {}", frameCount, result)
THEN_ABORT(result);
constexpr vk::FenceCreateInfo fenceCreateInfo = {.flags = vk::FenceCreateFlagBits::eSignaled};
result = device->m_Device.createFence(&fenceCreateInfo, nullptr, &m_FrameAvailableFence);
ERROR_IF(Failed(result), "Could not create a fence for frame {}. Cause: {}", frameCount, result)
THEN_ABORT(result);
constexpr vk::SemaphoreCreateInfo semaphoreCreateInfo = {};
result = device->m_Device.createSemaphore(&semaphoreCreateInfo, nullptr, &m_ImageAcquireSem);
ERROR_IF(Failed(result), "Could not create IA semaphore for frame {}. Cause: {}", frameCount, result)
THEN_ABORT(result);
result = device->m_Device.createSemaphore(&semaphoreCreateInfo, nullptr, &m_RenderFinishSem);
ERROR_IF(Failed(result), "Could not create RF semaphore for frame {}. Cause: {}", frameCount, result)
THEN_ABORT(result);
const vk::CommandBufferAllocateInfo allocateInfo = {
.commandPool = m_Pool, .level = vk::CommandBufferLevel::ePrimary, .commandBufferCount = 1};
result = m_Device->m_Device.allocateCommandBuffers(&allocateInfo, &m_CommandBuffer);
ERROR_IF(Failed(result), "Command buffer allocation failed. Cause: {}", result)
THEN_ABORT(result);
DEBUG("Frame {} created successfully.", frameCount);
}
Pipeline
CreatePipeline(const Device *device, const Swapchain *swapchain)
{
// Pipeline Setup
auto vertexShaderModule = CreateShader(device, VERTEX_SHADER_FILE);
auto fragmentShaderModule = CreateShader(device, FRAGMENT_SHADER_FILE);
eastl::array<vk::PipelineShaderStageCreateInfo, 2> shaderStages = {{
{
.stage = vk::ShaderStageFlagBits::eVertex,
.module = vertexShaderModule,
.pName = "main",
},
{
.stage = vk::ShaderStageFlagBits::eFragment,
.module = fragmentShaderModule,
.pName = "main",
},
}};
vk::PipelineLayoutCreateInfo pipelineLayoutCreateInfo = {
.setLayoutCount = 0,
.pSetLayouts = nullptr,
.pushConstantRangeCount = 0,
.pPushConstantRanges = nullptr,
};
vk::PipelineLayout pipelineLayout;
vk::Result result = device->m_Device.createPipelineLayout(&pipelineLayoutCreateInfo, nullptr, &pipelineLayout);
ERROR_IF(Failed(result), "Could not create a pipeline layout. Cause: {}", result) THEN_ABORT(result);
device->SetName(pipelineLayout, "Triangle Layout");
vk::VertexInputBindingDescription inputBindingDescription = Vertex::GetBinding(0);
auto inputAttributeDescription = Vertex::GetAttributes(0);
vk::PipelineVertexInputStateCreateInfo vertexInputStateCreateInfo = {
.vertexBindingDescriptionCount = 1,
.pVertexBindingDescriptions = &inputBindingDescription,
.vertexAttributeDescriptionCount = Cast<u32>(inputAttributeDescription.size()),
.pVertexAttributeDescriptions = inputAttributeDescription.data(),
};
vk::PipelineInputAssemblyStateCreateInfo inputAssemblyStateCreateInfo = {
.topology = vk::PrimitiveTopology::eTriangleList,
.primitiveRestartEnable = false,
};
vk::PipelineViewportStateCreateInfo viewportStateCreateInfo = {
.viewportCount = 1,
.scissorCount = 1,
};
vk::PipelineRasterizationStateCreateInfo rasterizationStateCreateInfo = {
.depthClampEnable = false,
.rasterizerDiscardEnable = false,
.polygonMode = vk::PolygonMode::eFill,
.cullMode = vk::CullModeFlagBits::eNone,
.frontFace = vk::FrontFace::eCounterClockwise,
.depthBiasEnable = false,
.lineWidth = 1.0,
};
vk::PipelineMultisampleStateCreateInfo multisampleStateCreateInfo = {
.rasterizationSamples = vk::SampleCountFlagBits::e1,
.sampleShadingEnable = false,
};
vk::PipelineDepthStencilStateCreateInfo depthStencilStateCreateInfo = {
.depthTestEnable = false,
.depthWriteEnable = false,
};
vk::PipelineColorBlendAttachmentState colorBlendAttachmentState = {
.blendEnable = false,
.srcColorBlendFactor = vk::BlendFactor::eSrcColor,
.dstColorBlendFactor = vk::BlendFactor::eOneMinusSrcColor,
.colorBlendOp = vk::BlendOp::eAdd,
.srcAlphaBlendFactor = vk::BlendFactor::eSrcAlpha,
.dstAlphaBlendFactor = vk::BlendFactor::eOneMinusSrcAlpha,
.alphaBlendOp = vk::BlendOp::eAdd,
.colorWriteMask = vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG |
vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA,
};
vk::PipelineColorBlendStateCreateInfo colorBlendStateCreateInfo = {
.logicOpEnable = false,
.attachmentCount = 1,
.pAttachments = &colorBlendAttachmentState,
};
eastl::array dynamicStates = {
vk::DynamicState::eScissor,
vk::DynamicState::eViewport,
};
vk::PipelineDynamicStateCreateInfo dynamicStateCreateInfo = {
.dynamicStateCount = Cast<u32>(dynamicStates.size()),
.pDynamicStates = dynamicStates.data(),
};
vk::PipelineRenderingCreateInfo renderingCreateInfo = {
.viewMask = 0,
.colorAttachmentCount = 1,
.pColorAttachmentFormats = &swapchain->m_Format,
};
vk::GraphicsPipelineCreateInfo pipelineCreateInfo = {
.pNext = &renderingCreateInfo,
.stageCount = Cast<u32>(shaderStages.size()),
.pStages = shaderStages.data(),
.pVertexInputState = &vertexInputStateCreateInfo,
.pInputAssemblyState = &inputAssemblyStateCreateInfo,
.pViewportState = &viewportStateCreateInfo,
.pRasterizationState = &rasterizationStateCreateInfo,
.pMultisampleState = &multisampleStateCreateInfo,
.pDepthStencilState = &depthStencilStateCreateInfo,
.pColorBlendState = &colorBlendStateCreateInfo,
.pDynamicState = &dynamicStateCreateInfo,
.layout = pipelineLayout,
};
vk::Pipeline pipeline;
result = device->m_Device.createGraphicsPipelines(nullptr, 1, &pipelineCreateInfo, nullptr, &pipeline);
ERROR_IF(Failed(result), "Could not create a graphics pipeline. Cause: {}", result)
THEN_ABORT(result);
device->SetName(pipeline, "Triangle Pipeline");
device->m_Device.destroy(vertexShaderModule, nullptr);
device->m_Device.destroy(fragmentShaderModule, nullptr);
return {device, pipelineLayout, pipeline, {}};
}
vk::ShaderModule
CreateShader(const Device *device, cstr shaderFile)
{
eastl::vector<u32> shaderCode = ReadFile(shaderFile);
const vk::ShaderModuleCreateInfo shaderModuleCreateInfo = {
.codeSize = shaderCode.size() * sizeof(u32),
.pCode = shaderCode.data(),
};
vk::ShaderModule shaderModule;
vk::Result result = device->m_Device.createShaderModule(&shaderModuleCreateInfo, nullptr, &shaderModule);
ERROR_IF(Failed(result), "Shader {} could not be created. Cause: {}", shaderFile, result)
THEN_ABORT(result);
return shaderModule;
}
Frame::~Frame()
{
m_Device->m_Device.destroy(m_RenderFinishSem, nullptr);
m_Device->m_Device.destroy(m_ImageAcquireSem, nullptr);
m_Device->m_Device.destroy(m_FrameAvailableFence, nullptr);
m_Device->m_Device.destroy(m_Pool, nullptr);
DEBUG("Destoryed Frame");
}

View File

@ -5,8 +5,10 @@ cmake_minimum_required(VERSION 3.13)
#set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined -fsanitize=address")
add_executable(box "box.cpp" "stb_image.h")
add_shader(box "shader/box.slang")
add_resource_dir(box "shader/")
add_shader(box "shader/box.vert.glsl")
add_shader(box "shader/box.frag.glsl")
add_shader(box "shader/box.vs.hlsl")
add_shader(box "shader/box.ps.hlsl")
target_link_libraries(box PRIVATE aster_core)
target_link_libraries(box PRIVATE util_helper)

View File

@ -1,29 +1,33 @@
// =============================================
// Aster: box.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "aster/aster.h"
#include "aster/core/buffer.h"
#include "aster/core/constants.h"
#include "aster/core/context.h"
#include "aster/core/device.h"
#include "aster/core/image.h"
#include "aster/core/physical_device.h"
#include "aster/core/pipeline.h"
#include "aster/core/swapchain.h"
#include "aster/core/window.h"
#include "helpers.h"
#define STB_IMAGE_IMPLEMENTATION
#include "aster/systems/commit_manager.h"
#include "aster/systems/rendering_device.h"
#include "aster/util/files.h"
#include "aster/systems/buffer_manager.h"
#include "aster/systems/image_manager.h"
#include "frame.h"
#include "stb_image.h"
#include <EASTL/array.h>
constexpr u32 MAX_FRAMES_IN_FLIGHT = 3;
constexpr auto VERTEX_SHADER_FILE = "shader/box.vs.hlsl.spv";
constexpr auto FRAGMENT_SHADER_FILE = "shader/box.ps.hlsl.spv";
constexpr auto SHADER_FILE = "box";
struct ImageFile
{
@ -34,12 +38,6 @@ struct ImageFile
bool Load(cstr fileName);
[[nodiscard]] usize GetSize() const;
operator eastl::span<u8>() const
{
return {static_cast<u8 *>(m_Data), GetSize()};
}
~ImageFile();
};
@ -65,7 +63,7 @@ ImageFile::Load(cstr fileName)
usize
ImageFile::GetSize() const
{
return static_cast<usize>(m_Width) * m_Height * m_NumChannels;
return Cast<usize>(m_Width) * m_Height * m_NumChannels;
}
ImageFile::~ImageFile()
@ -74,6 +72,9 @@ ImageFile::~ImageFile()
m_Data = nullptr;
}
vk::ShaderModule CreateShader(const Device *device, cstr shaderFile);
Pipeline CreatePipeline(const Device *device, const Swapchain *swapchain);
struct Vertex
{
vec3 m_Position;
@ -92,55 +93,86 @@ struct Camera
int
main(int, char **)
{
MIN_LOG_LEVEL(Logger::LogType::eDebug);
MIN_LOG_LEVEL(Logger::LogType::eInfo);
Window window = {"Box (Aster)", {640, 480}};
Context context = {"Box", VERSION};
Surface surface = {&context, &window, "Primary"};
PhysicalDevices physicalDevices = {&surface, &context};
PhysicalDevice deviceToUse = FindSuitableDevice(physicalDevices);
INFO("Using {} as the primary device.", deviceToUse.m_DeviceProperties.deviceName.data());
Features enabledDeviceFeatures = {
.m_Vulkan10Features = {.samplerAnisotropy = true},
.m_Vulkan12Features =
{
.descriptorIndexing = true,
.shaderSampledImageArrayNonUniformIndexing = true,
.shaderStorageBufferArrayNonUniformIndexing = true,
.shaderStorageImageArrayNonUniformIndexing = true,
.descriptorBindingUniformBufferUpdateAfterBind = true, // Not related to Bindless
.descriptorBindingSampledImageUpdateAfterBind = true,
.descriptorBindingStorageImageUpdateAfterBind = true,
.descriptorBindingStorageBufferUpdateAfterBind = true,
.descriptorBindingPartiallyBound = true,
.runtimeDescriptorArray = true,
.timelineSemaphore = true,
.bufferDeviceAddress = true,
.bufferDeviceAddressCaptureReplay = true,
},
.m_Vulkan12Features = {.bufferDeviceAddress = true},
.m_Vulkan13Features = {.synchronization2 = true, .dynamicRendering = true},
};
QueueAllocation queueAllocation = FindAppropriateQueueAllocation(&deviceToUse);
Device device = {&context, &deviceToUse, &enabledDeviceFeatures, {queueAllocation}, "Primary Device"};
vk::Queue commandQueue = device.GetQueue(queueAllocation.m_Family, 0);
Swapchain swapchain = {&surface, &device, window.GetSize(), "Primary Chain"};
Pipeline pipeline = CreatePipeline(&device, &swapchain);
systems::RenderingDevice device{{
.m_Window = window,
.m_Features = enabledDeviceFeatures,
.m_AppName = "Box",
.m_AppVersion = VERSION,
.m_ShaderSearchPaths = {"shader/"},
}};
systems::BufferManager bufferManager{&device, 12, 0};
systems::ImageManager imageManager{&device, 12, 1};
Pipeline pipeline;
auto pipelineResult =
device.CreateGraphicsPipeline(pipeline, {.m_Shaders = {
{.m_ShaderFile = SHADER_FILE, .m_EntryPoints = {"vsmain", "fsmain"}},
}});
ERROR_IF(pipelineResult, "Could not create pipeline. Cause: {}", pipelineResult.What())
THEN_ABORT(pipelineResult.Value());
auto swapchainSize = device.GetSwapchainSize();
Camera camera = {
.m_Model = {1.0f},
.m_View = lookAt(vec3(0.0f, 2.0f, 2.0f), vec3(0.0f), vec3(0.0f, 1.0f, 0.0f)),
.m_View = glm::lookAt(vec3(0.0f, 2.0f, 2.0f), vec3(0.0f), vec3(0.0f, 1.0f, 0.0f)),
.m_Perspective = glm::perspective(
70_deg, static_cast<f32>(swapchainSize.m_Width) / static_cast<f32>(swapchainSize.m_Height), 0.1f, 100.0f),
70_deg, Cast<f32>(swapchain.m_Extent.width) / Cast<f32>(swapchain.m_Extent.height), 0.1f, 100.0f),
};
vk::DescriptorPool descriptorPool;
vk::DescriptorSet descriptorSet;
{
vk::DescriptorSetLayout descriptorSetLayout = pipeline.m_SetLayouts.front();
eastl::array poolSizes = {
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eUniformBuffer,
.descriptorCount = 1,
},
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eCombinedImageSampler,
.descriptorCount = 1,
},
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eStorageBuffer,
.descriptorCount = 1,
},
};
vk::DescriptorPoolCreateInfo descriptorPoolCreateInfo = {
.maxSets = 1, .poolSizeCount = Cast<u32>(poolSizes.size()), .pPoolSizes = poolSizes.data()};
AbortIfFailed(device.m_Device.createDescriptorPool(&descriptorPoolCreateInfo, nullptr, &descriptorPool));
vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo = {
.descriptorPool = descriptorPool,
.descriptorSetCount = 1,
.pSetLayouts = &descriptorSetLayout,
};
AbortIfFailed(device.m_Device.allocateDescriptorSets(&descriptorSetAllocateInfo, &descriptorSet));
}
vk::CommandPool copyPool;
vk::CommandBuffer copyBuffer;
{
vk::CommandPoolCreateInfo poolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient,
.queueFamilyIndex = queueAllocation.m_Family,
};
AbortIfFailedM(device.m_Device.createCommandPool(&poolCreateInfo, nullptr, &copyPool),
"Copy command pool creation failed.");
vk::CommandBufferAllocateInfo bufferAllocateInfo = {
.commandPool = copyPool,
.level = vk::CommandBufferLevel::ePrimary,
.commandBufferCount = 1,
};
AbortIfFailedM(device.m_Device.allocateCommandBuffers(&bufferAllocateInfo, &copyBuffer),
"Copy command buffer allocation failed.");
}
eastl::array vertices = {
Vertex{.m_Position = vec3(0.5f, 0.5f, -0.5f), .m_TexCoord0 = vec2(1.0f, 1.0f)},
Vertex{.m_Position = vec3(0.5f, -0.5f, -0.5f), .m_TexCoord0 = vec2(1.0f, 0.0f)},
@ -190,19 +222,21 @@ main(int, char **)
assert(loaded);
INFO("Image {}x{} : {} channels", imageFile.m_Width, imageFile.m_Height, imageFile.m_NumChannels);
auto vbo = device.CreateStorageBuffer(vertices.size() * sizeof vertices[0], "Vertex Buffer");
vbo->Write(0, vertices.size() * sizeof vertices[0], vertices.data());
auto crate = device.CreateTexture2DWithView({
auto vbo = bufferManager.CreateStorageBuffer(vertices.size() * sizeof vertices[0], "Vertex Buffer").ToPointer();
auto crate = imageManager
.CreateTexture2D({
.m_Format = vk::Format::eR8G8B8A8Srgb,
.m_Extent = {imageFile.m_Width, imageFile.m_Height},
.m_Name = "Crate Texture",
});
})
.ToPointer();
vbo->Write(&device, 0, vertices.size() * sizeof vertices[0], vertices.data());
{
StagingBuffer imageStaging;
auto imageStaging = device.CreateStagingBuffer(imageFile.GetSize(), "Image Staging");
imageStaging->Write(0, imageFile.GetSize(), imageFile.m_Data);
imageStaging.Init(&device, imageFile.GetSize(), "Image Staging");
imageStaging.Write(&device, 0, imageFile.GetSize(), imageFile.m_Data);
vk::ImageMemoryBarrier2 imageReadyToWrite = {
.srcStageMask = vk::PipelineStageFlagBits2::eTransfer,
@ -211,9 +245,9 @@ main(int, char **)
.dstAccessMask = vk::AccessFlagBits2::eTransferWrite,
.oldLayout = vk::ImageLayout::eUndefined,
.newLayout = vk::ImageLayout::eTransferDstOptimal,
.srcQueueFamilyIndex = vk::QueueFamilyIgnored,
.dstQueueFamilyIndex = vk::QueueFamilyIgnored,
.image = crate->GetImage(),
.srcQueueFamilyIndex = queueAllocation.m_Family,
.dstQueueFamilyIndex = queueAllocation.m_Family,
.image = crate->m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
@ -235,9 +269,9 @@ main(int, char **)
.dstAccessMask = vk::AccessFlagBits2::eShaderRead,
.oldLayout = vk::ImageLayout::eTransferDstOptimal,
.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
.srcQueueFamilyIndex = device.m_TransferQueueFamily,
.dstQueueFamilyIndex = device.m_PrimaryQueueFamily,
.image = crate->GetImage(),
.srcQueueFamilyIndex = queueAllocation.m_Family,
.dstQueueFamilyIndex = queueAllocation.m_Family,
.image = crate->m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
@ -252,25 +286,140 @@ main(int, char **)
.pImageMemoryBarriers = &imageReadyToRead,
};
auto context = device.CreateTransferContext();
context.Begin();
vk::Fence fence;
vk::FenceCreateInfo fenceCreateInfo = {};
AbortIfFailed(device.m_Device.createFence(&fenceCreateInfo, nullptr, &fence));
context.Dependency(imageReadyToWriteDependency);
vk::CommandBufferBeginInfo beginInfo = {.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit};
AbortIfFailed(copyBuffer.begin(&beginInfo));
context.UploadTexture(crate->m_Image, imageFile);
copyBuffer.pipelineBarrier2(&imageReadyToWriteDependency);
context.Dependency(imageReadyToReadDependency);
vk::BufferImageCopy imageCopy = {
.bufferOffset = 0,
.bufferRowLength = imageFile.m_Width,
.bufferImageHeight = imageFile.m_Height,
.imageSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {},
.imageExtent = {imageFile.m_Width, imageFile.m_Height, 1},
};
copyBuffer.copyBufferToImage(imageStaging.m_Buffer, crate->m_Image, vk::ImageLayout::eTransferDstOptimal, 1,
&imageCopy);
context.End();
copyBuffer.pipelineBarrier2(&imageReadyToReadDependency);
auto recpt = device.Submit(context);
device.WaitOn(recpt);
AbortIfFailed(copyBuffer.end());
vk::SubmitInfo submitInfo = {
.commandBufferCount = 1,
.pCommandBuffers = &copyBuffer,
};
AbortIfFailed(commandQueue.submit(1, &submitInfo, fence));
INFO("Submit copy");
AbortIfFailed(device.m_Device.waitForFences(1, &fence, true, MaxValue<u64>));
INFO("Fence wait");
AbortIfFailedM(device.m_Device.resetCommandPool(copyPool, {}), "Couldn't reset command pool.");
device.m_Device.destroy(fence, nullptr);
imageStaging.Destroy(&device);
}
auto ubo = device.CreateStorageBuffer(sizeof camera, "Camera UBO");
ubo->Write(0, sizeof camera, &camera);
vk::Sampler sampler;
{
vk::SamplerCreateInfo samplerCreateInfo = {
.magFilter = vk::Filter::eLinear,
.minFilter = vk::Filter::eLinear,
.mipmapMode = vk::SamplerMipmapMode::eLinear,
.addressModeU = vk::SamplerAddressMode::eRepeat,
.addressModeV = vk::SamplerAddressMode::eRepeat,
.addressModeW = vk::SamplerAddressMode::eRepeat,
.mipLodBias = 0.2f,
.anisotropyEnable = true,
.maxAnisotropy = 1.0f,
.compareEnable = false,
.minLod = 0,
.maxLod = 4,
.unnormalizedCoordinates = false,
};
AbortIfFailed(device.m_Device.createSampler(&samplerCreateInfo, nullptr, &sampler));
}
auto ubo = bufferManager.CreateUniformBuffer(sizeof camera, "Camera UBO").ToPointer();
ubo->Write(&device, 0, sizeof camera, &camera);
vk::DescriptorBufferInfo descriptorBufferInfo = {
.buffer = ubo->m_Buffer,
.offset = 0,
.range = ubo->GetSize(),
};
vk::DescriptorImageInfo descriptorImageInfo = {
.sampler = sampler,
.imageView = crate->m_View,
.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
};
vk::DescriptorBufferInfo descriptorStorageBufferInfo = {
.buffer = vbo->m_Buffer,
.offset = 0,
.range = vbo->GetSize(),
};
eastl::array writeDescriptors = {
vk::WriteDescriptorSet{
.dstSet = descriptorSet,
.dstBinding = 0,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eUniformBuffer,
.pBufferInfo = &descriptorBufferInfo,
},
vk::WriteDescriptorSet{
.dstSet = descriptorSet,
.dstBinding = 1,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
.pImageInfo = &descriptorImageInfo,
},
vk::WriteDescriptorSet{
.dstSet = descriptorSet,
.dstBinding = 2,
.dstArrayElement = 0,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eStorageBuffer,
.pBufferInfo = &descriptorStorageBufferInfo,
},
};
device.m_Device.updateDescriptorSets(Cast<u32>(writeDescriptors.size()), writeDescriptors.data(), 0, nullptr);
// Persistent variables
vk::Viewport viewport = {
.x = 0,
.y = Cast<f32>(swapchain.m_Extent.height),
.width = Cast<f32>(swapchain.m_Extent.width),
.height = -Cast<f32>(swapchain.m_Extent.height),
.minDepth = 0.0,
.maxDepth = 1.0,
};
vk::Rect2D scissor = {
.offset = {0, 0},
.extent = swapchain.m_Extent,
};
auto resizeViewportScissor = [&viewport, &scissor](vk::Extent2D extent) {
viewport.y = Cast<f32>(extent.height);
viewport.width = Cast<f32>(extent.width);
viewport.height = -Cast<f32>(extent.height);
scissor.extent = extent;
};
swapchain.RegisterResizeCallback(resizeViewportScissor);
vk::ImageSubresourceRange subresourceRange = {
.aspectMask = vk::ImageAspectFlagBits::eColor,
@ -288,8 +437,8 @@ main(int, char **)
.dstAccessMask = vk::AccessFlagBits2::eColorAttachmentWrite,
.oldLayout = vk::ImageLayout::eUndefined,
.newLayout = vk::ImageLayout::eColorAttachmentOptimal,
.srcQueueFamilyIndex = vk::QueueFamilyIgnored,
.dstQueueFamilyIndex = vk::QueueFamilyIgnored,
.srcQueueFamilyIndex = queueAllocation.m_Family,
.dstQueueFamilyIndex = queueAllocation.m_Family,
.subresourceRange = subresourceRange,
};
vk::DependencyInfo topOfThePipeDependency = {
@ -303,8 +452,8 @@ main(int, char **)
.dstAccessMask = vk::AccessFlagBits2::eNone,
.oldLayout = vk::ImageLayout::eColorAttachmentOptimal,
.newLayout = vk::ImageLayout::ePresentSrcKHR,
.srcQueueFamilyIndex = vk::QueueFamilyIgnored,
.dstQueueFamilyIndex = vk::QueueFamilyIgnored,
.srcQueueFamilyIndex = queueAllocation.m_Family,
.dstQueueFamilyIndex = queueAllocation.m_Family,
.subresourceRange = subresourceRange,
};
vk::DependencyInfo renderToPresentDependency = {
@ -312,85 +461,50 @@ main(int, char **)
.pImageMemoryBarriers = &renderToPresentBarrier,
};
eastl::fixed_vector<Ref<ImageView>, MAX_FRAMES_IN_FLIGHT> depthImages;
FrameManager frameManager = {&device, queueAllocation.m_Family, MAX_FRAMES_IN_FLIGHT};
eastl::fixed_vector<Ref<Image>, MAX_FRAMES_IN_FLIGHT> depthImages;
auto initDepthImages = [&depthImages, &device](vk::Extent2D const extent) {
for (u32 i = 0; i < MAX_FRAMES_IN_FLIGHT; ++i)
auto initDepthImages = [&imageManager, &depthImages, &frameManager] (const vk::Extent2D extent) {
for (u32 i = 0; i < frameManager.m_FramesInFlight; ++i)
{
depthImages.push_back(device.CreateDepthStencilImageWithView({.m_Extent = extent, .m_Name = "Depth"}));
depthImages.push_back(
imageManager.CreateDepthStencilImage({.m_Extent = extent, .m_Name = "Depth"}).ToPointer());
}
};
initDepthImages(swapchainSize);
initDepthImages(swapchain.m_Extent);
auto recreateDepthBuffers = [&depthImages, &initDepthImages](vk::Extent2D const extent) {
auto recreateDepthBuffers = [&depthImages, &initDepthImages](const vk::Extent2D extent) {
depthImages.clear();
initDepthImages(extent);
};
struct PCB
{
uptr m_VertexBuffer;
uptr m_Camera;
systems::ResId<TextureView> m_Texture;
};
static_assert(sizeof(PCB) == 24);
auto &commitManager = systems::CommitManager::Instance();
PCB pcb = {
.m_VertexBuffer = vbo->GetDeviceAddress(),
.m_Camera = ubo->GetDeviceAddress(),
.m_Texture = commitManager.CommitTexture(crate),
};
swapchain.RegisterResizeCallback(recreateDepthBuffers);
Time::Init();
auto prevSwapchainSize = swapchainSize;
INFO("Starting loop");
while (window.Poll())
{
Time::Update();
camera.m_Model *= rotate(mat4{1.0f}, static_cast<f32>(45.0_deg * Time::m_Delta), vec3(0.0f, 1.0f, 0.0f));
ubo->Write(0, sizeof camera, &camera);
camera.m_Model *= rotate(mat4{1.0f}, Cast<f32>(45.0_deg * Time::m_Delta), vec3(0.0f, 1.0f, 0.0f));
ubo->Write(&device, 0, sizeof camera, &camera);
auto &currentFrame = device.GetNextFrame();
Frame *currentFrame = frameManager.GetNextFrame(&swapchain, &surface, window.GetSize());
prevSwapchainSize = swapchainSize;
swapchainSize = currentFrame.m_SwapchainSize;
if (swapchainSize != prevSwapchainSize)
{
recreateDepthBuffers(swapchainSize);
}
vk::Viewport viewport = {
.x = 0,
.y = static_cast<f32>(swapchainSize.m_Height),
.width = static_cast<f32>(swapchainSize.m_Width),
.height = -static_cast<f32>(swapchainSize.m_Height),
.minDepth = 0.0,
.maxDepth = 1.0,
};
vk::Rect2D scissor = {
.offset = {0, 0},
.extent = static_cast<vk::Extent2D>(swapchainSize),
};
vk::ImageView currentImageView = currentFrame.m_SwapchainImageView;
vk::Image currentImage = currentFrame.m_SwapchainImage;
vk::ImageView currentDepthImageView = depthImages[currentFrame.m_FrameIdx]->m_View;
u32 imageIndex = currentFrame->m_ImageIdx;
vk::ImageView currentImageView = swapchain.m_ImageViews[imageIndex];
vk::Image currentImage = swapchain.m_Images[imageIndex];
vk::CommandBuffer cmd = currentFrame->m_CommandBuffer;
vk::ImageView currentDepthImageView = depthImages[currentFrame->m_FrameIdx]->m_View;
topOfThePipeBarrier.image = currentImage;
renderToPresentBarrier.image = currentImage;
auto context = currentFrame.CreateGraphicsContext();
vk::CommandBufferBeginInfo beginInfo = {.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit};
AbortIfFailed(cmd.begin(&beginInfo));
context.Begin();
context.Dependency(topOfThePipeDependency);
cmd.pipelineBarrier2(&topOfThePipeDependency);
// Render
eastl::array attachmentInfos = {
@ -414,30 +528,207 @@ main(int, char **)
};
vk::RenderingInfo renderingInfo = {
.renderArea = scissor,
.renderArea = {.extent = swapchain.m_Extent},
.layerCount = 1,
.colorAttachmentCount = static_cast<u32>(attachmentInfos.size()),
.colorAttachmentCount = Cast<u32>(attachmentInfos.size()),
.pColorAttachments = attachmentInfos.data(),
.pDepthAttachment = &depthAttachment,
};
context.BeginRendering(renderingInfo);
cmd.beginRendering(&renderingInfo);
context.SetViewport(viewport);
context.BindPipeline(pipeline);
context.PushConstantBlock(pcb);
context.Draw(vertices.size());
cmd.setViewport(0, 1, &viewport);
cmd.setScissor(0, 1, &scissor);
cmd.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline.m_Pipeline);
cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline.m_Layout, 0, 1, &descriptorSet, 0, nullptr);
cmd.draw(Cast<u32>(vertices.size()), 1, 0, 0);
context.EndRendering();
cmd.endRendering();
context.Dependency(renderToPresentDependency);
cmd.pipelineBarrier2(&renderToPresentDependency);
context.End();
AbortIfFailed(cmd.end());
device.Present(currentFrame, context);
vk::PipelineStageFlags waitDstStage = vk::PipelineStageFlagBits::eColorAttachmentOutput;
vk::SubmitInfo submitInfo = {
.waitSemaphoreCount = 1,
.pWaitSemaphores = &currentFrame->m_ImageAcquireSem,
.pWaitDstStageMask = &waitDstStage,
.commandBufferCount = 1,
.pCommandBuffers = &cmd,
.signalSemaphoreCount = 1,
.pSignalSemaphores = &currentFrame->m_RenderFinishSem,
};
AbortIfFailed(commandQueue.submit(1, &submitInfo, currentFrame->m_FrameAvailableFence));
currentFrame->Present(commandQueue, &swapchain, &surface, window.GetSize());
}
device.WaitIdle();
device.m_Device.destroy(sampler, nullptr);
device.m_Device.destroy(descriptorPool, nullptr);
device.m_Device.destroy(copyPool, nullptr);
return 0;
}
Pipeline
CreatePipeline(const Device *device, const Swapchain *swapchain)
{
// Pipeline Setup
auto vertexShaderModule = CreateShader(device, VERTEX_SHADER_FILE);
auto fragmentShaderModule = CreateShader(device, FRAGMENT_SHADER_FILE);
eastl::array<vk::PipelineShaderStageCreateInfo, 2> shaderStages = {{
{
.stage = vk::ShaderStageFlagBits::eVertex,
.module = vertexShaderModule,
.pName = "main",
},
{
.stage = vk::ShaderStageFlagBits::eFragment,
.module = fragmentShaderModule,
.pName = "main",
},
}};
eastl::array descriptorSetLayoutBinding = {
vk::DescriptorSetLayoutBinding{
.binding = 0,
.descriptorType = vk::DescriptorType::eUniformBuffer,
.descriptorCount = 1,
.stageFlags = vk::ShaderStageFlagBits::eVertex,
},
vk::DescriptorSetLayoutBinding{
.binding = 1,
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
.descriptorCount = 1,
.stageFlags = vk::ShaderStageFlagBits::eFragment,
},
vk::DescriptorSetLayoutBinding{
.binding = 2,
.descriptorType = vk::DescriptorType::eStorageBuffer,
.descriptorCount = 1,
.stageFlags = vk::ShaderStageFlagBits::eVertex,
},
};
vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = {
.bindingCount = Cast<u32>(descriptorSetLayoutBinding.size()),
.pBindings = descriptorSetLayoutBinding.data(),
};
vk::DescriptorSetLayout descriptorSetLayout;
AbortIfFailed(
device->m_Device.createDescriptorSetLayout(&descriptorSetLayoutCreateInfo, nullptr, &descriptorSetLayout));
vk::PipelineLayoutCreateInfo pipelineLayoutCreateInfo = {
.setLayoutCount = 1,
.pSetLayouts = &descriptorSetLayout,
.pushConstantRangeCount = 0,
.pPushConstantRanges = nullptr,
};
vk::PipelineLayout pipelineLayout;
AbortIfFailed(device->m_Device.createPipelineLayout(&pipelineLayoutCreateInfo, nullptr, &pipelineLayout));
device->SetName(pipelineLayout, "Box Layout");
vk::PipelineVertexInputStateCreateInfo vertexInputStateCreateInfo = {};
vk::PipelineInputAssemblyStateCreateInfo inputAssemblyStateCreateInfo = {
.topology = vk::PrimitiveTopology::eTriangleList,
.primitiveRestartEnable = false,
};
vk::PipelineViewportStateCreateInfo viewportStateCreateInfo = {
.viewportCount = 1,
.scissorCount = 1,
};
vk::PipelineRasterizationStateCreateInfo rasterizationStateCreateInfo = {
.depthClampEnable = false,
.rasterizerDiscardEnable = false,
.polygonMode = vk::PolygonMode::eFill,
.cullMode = vk::CullModeFlagBits::eNone,
.frontFace = vk::FrontFace::eCounterClockwise,
.depthBiasEnable = false,
.lineWidth = 1.0,
};
vk::PipelineMultisampleStateCreateInfo multisampleStateCreateInfo = {
.rasterizationSamples = vk::SampleCountFlagBits::e1,
.sampleShadingEnable = false,
};
vk::PipelineDepthStencilStateCreateInfo depthStencilStateCreateInfo = {
.depthTestEnable = true,
.depthWriteEnable = true,
.depthCompareOp = vk::CompareOp::eLess,
};
vk::PipelineColorBlendAttachmentState colorBlendAttachmentState = {
.blendEnable = false,
.srcColorBlendFactor = vk::BlendFactor::eSrcColor,
.dstColorBlendFactor = vk::BlendFactor::eOneMinusSrcColor,
.colorBlendOp = vk::BlendOp::eAdd,
.srcAlphaBlendFactor = vk::BlendFactor::eSrcAlpha,
.dstAlphaBlendFactor = vk::BlendFactor::eOneMinusSrcAlpha,
.alphaBlendOp = vk::BlendOp::eAdd,
.colorWriteMask = vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG |
vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA,
};
vk::PipelineColorBlendStateCreateInfo colorBlendStateCreateInfo = {
.logicOpEnable = false,
.attachmentCount = 1,
.pAttachments = &colorBlendAttachmentState,
};
eastl::array dynamicStates = {
vk::DynamicState::eScissor,
vk::DynamicState::eViewport,
};
vk::PipelineDynamicStateCreateInfo dynamicStateCreateInfo = {
.dynamicStateCount = Cast<u32>(dynamicStates.size()),
.pDynamicStates = dynamicStates.data(),
};
vk::PipelineRenderingCreateInfo renderingCreateInfo = {
.viewMask = 0,
.colorAttachmentCount = 1,
.pColorAttachmentFormats = &swapchain->m_Format,
.depthAttachmentFormat = vk::Format::eD24UnormS8Uint,
};
vk::GraphicsPipelineCreateInfo pipelineCreateInfo = {
.pNext = &renderingCreateInfo,
.stageCount = Cast<u32>(shaderStages.size()),
.pStages = shaderStages.data(),
.pVertexInputState = &vertexInputStateCreateInfo,
.pInputAssemblyState = &inputAssemblyStateCreateInfo,
.pViewportState = &viewportStateCreateInfo,
.pRasterizationState = &rasterizationStateCreateInfo,
.pMultisampleState = &multisampleStateCreateInfo,
.pDepthStencilState = &depthStencilStateCreateInfo,
.pColorBlendState = &colorBlendStateCreateInfo,
.pDynamicState = &dynamicStateCreateInfo,
.layout = pipelineLayout,
};
vk::Pipeline pipeline;
AbortIfFailed(device->m_Device.createGraphicsPipelines(nullptr, 1, &pipelineCreateInfo, nullptr, &pipeline));
device->SetName(pipeline, "Box Pipeline");
device->m_Device.destroy(vertexShaderModule, nullptr);
device->m_Device.destroy(fragmentShaderModule, nullptr);
return {device, pipelineLayout, pipeline, {descriptorSetLayout}};
}
vk::ShaderModule
CreateShader(const Device *device, cstr shaderFile)
{
eastl::vector<u32> shaderCode = ReadFile(shaderFile);
const vk::ShaderModuleCreateInfo shaderModuleCreateInfo = {
.codeSize = shaderCode.size() * sizeof(u32),
.pCode = shaderCode.data(),
};
vk::ShaderModule shaderModule;
AbortIfFailedMV(device->m_Device.createShaderModule(&shaderModuleCreateInfo, nullptr, &shaderModule),
"Shader {} could not be created.", shaderFile);
return shaderModule;
}

View File

@ -1,23 +0,0 @@
[vk::binding(0, 0)] __DynamicResource<__DynamicResourceKind.General> gBuffers[];
[vk::binding(1, 0)] __DynamicResource<__DynamicResourceKind.Sampler> gSamplers[];
[vk::binding(2, 0)] __DynamicResource<__DynamicResourceKind.General> gStorageTextures[];
export T getDescriptorFromHandle<T>(DescriptorHandle<T> handle) where T : IOpaqueDescriptor
{
__target_switch
{
case spirv:
switch (T.kind) {
case DescriptorKind.Buffer:
return gBuffers[((uint2)handle).x].asOpaqueDescriptor<T>();
case DescriptorKind.CombinedTextureSampler:
return gSamplers[((uint2)handle).x].asOpaqueDescriptor<T>();
case DescriptorKind.Texture:
return gStorageTextures[((uint2)handle).x].asOpaqueDescriptor<T>();
default:
return defaultGetDescriptorFromHandle(handle);
}
default:
return defaultGetDescriptorFromHandle(handle);
}
}

View File

@ -0,0 +1,11 @@
#version 450
#pragma shader_stage(fragment)
layout (location = 0) in vec2 inUV;
layout (location = 0) out vec4 outColor;
layout(binding = 1) uniform sampler2D tex;
void main() {
outColor = vec4(texture(tex, inUV).rgb, 1.0f);
}

View File

@ -0,0 +1,19 @@
struct FS_Input {
float2 UV0 : TEXCOORD0;
};
struct FS_Output
{
float4 ColorTarget : SV_Target0;
};
[[vk::binding(1, 0)]] Texture2D<float4> Texture;
[[vk::binding(1, 0)]] SamplerState Sampler;
FS_Output main(FS_Input StageInput) {
FS_Output output;
output.ColorTarget = float4(Texture.Sample(Sampler, StageInput.UV0).rgb, 1.0);
return output;
}

View File

@ -1,57 +0,0 @@
import bindless;
struct VertexData
{
float4 position;
float2 texCoord0;
float2 _pad0;
};
struct CameraData
{
float4x4 model;
float4x4 view;
float4x4 projection;
};
struct PCB {
VertexData* vertexBuffer;
CameraData* cameraBuffer;
Sampler2D.Handle texture;
};
[vk::push_constant]
uniform PCB pcb;
struct VSIn {
uint vertexIndex : SV_VertexID;
};
struct VSOut
{
float4 position : SV_POSITION;
float2 texCoord0 : TEXCOORD0;
};
struct FSOut {
float4 Color;
};
[shader("vertex")]
func vsmain(VSIn input) -> VSOut {
VSOut output;
VertexData vd = pcb.vertexBuffer[input.vertexIndex];
output.position = mul(mul(mul(float4(vd.position.xyz, 1.0f), pcb.cameraBuffer->model), pcb.cameraBuffer->view), pcb.cameraBuffer->projection);
output.texCoord0 = vd.texCoord0;
return output;
}
[shader("fragment")]
func fsmain(VSOut input) -> FSOut {
FSOut outp;
outp.Color = float4(pcb.texture.Sample(input.texCoord0).rgb, 1.0);
return outp;
}

View File

@ -0,0 +1,19 @@
#version 450
#pragma shader_stage(vertex)
layout(location=0) in vec4 position;
layout(location=1) in vec2 uv0;
layout(location=0) out vec2 outUV;
layout(binding=0) uniform Camera {
mat4 model;
mat4 view;
mat4 proj;
} ubo;
void main() {
outUV = uv0;
gl_Position = ubo.proj * ubo.view * ubo.model * vec4(position.xyz, 1.0f);
// outColor = vec3(0.5f, 0.3f, 0.1f);
}

View File

@ -0,0 +1,36 @@
struct VS_Input
{
uint VertexIndex : SV_VertexID;
};
struct VS_Output
{
float2 UV0 : TEXCOORD0;
float4 VertexPosition : SV_Position;
};
struct CameraData {
float4x4 Model;
float4x4 View;
float4x4 Projection;
};
struct VertexData {
float4 Position;
float2 UV0;
};
[[vk::binding(0, 0)]] ConstantBuffer<CameraData> Camera;
[[vk::binding(2, 0)]] StructuredBuffer<VertexData> Vertices;
VS_Output main(VS_Input StageInput) {
VS_Output output;
output.UV0 = Vertices[StageInput.VertexIndex].UV0;
float4 position = Vertices[StageInput.VertexIndex].Position;
output.VertexPosition = mul(Camera.Projection, mul(Camera.View, mul(Camera.Model, position)));
return output;
}

View File

@ -5,25 +5,28 @@ cmake_minimum_required(VERSION 3.13)
#set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined -fsanitize=address")
find_path(TINYGLTF_INCLUDE_DIRS "tiny_gltf.h")
add_executable(model_render
"model_render.cpp"
add_executable(model_render "model_render.cpp"
"pipeline_utils.cpp"
"pipeline_utils.h"
"asset_loader.cpp"
"asset_loader.h"
"light_manager.cpp"
"light_manager.h"
"gpu_resource_manager.cpp"
"gpu_resource_manager.h"
"nodes.cpp"
"nodes.h"
"ibl_helpers.cpp"
"ibl_helpers.h"
"tiny_gltf_setup.cpp")
"ibl_helpers.h")
add_shader(model_render "shader/background.slang")
add_shader(model_render "shader/bindless.slang")
add_shader(model_render "shader/common_structs.slang")
add_shader(model_render "shader/environment.slang")
add_shader(model_render "shader/eqrect_to_cube.slang")
add_shader(model_render "shader/ibl_common.slang")
add_shader(model_render "shader/model.slang")
add_shader(model_render "shader/model.vs.hlsl")
add_shader(model_render "shader/model.ps.hlsl")
add_shader(model_render "shader/eqrect_to_cube.cs.hlsl")
add_shader(model_render "shader/background.vs.hlsl")
add_shader(model_render "shader/background.ps.hlsl")
add_shader(model_render "shader/diffuse_irradiance.cs.hlsl")
add_shader(model_render "shader/prefilter.cs.hlsl")
add_shader(model_render "shader/brdf_lut.cs.hlsl")
target_link_libraries(model_render PRIVATE aster_core)
target_link_libraries(model_render PRIVATE util_helper)

View File

@ -1,26 +1,27 @@
// =============================================
// Aster: asset_loader.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#define TINYGLTF_NOEXCEPTION
#define JSON_NOEXCEPTION
#define TINYGLTF_IMPLEMENTATION
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "aster/core/buffer.h"
#include "aster/core/device.h"
#include "aster/core/image.h"
#include "asset_loader.h"
#include "gpu_resource_manager.h"
#include "helpers.h"
#include "aster/systems/commit_manager.h"
#include "aster/systems/rendering_device.h"
#include "asset_loader.h"
#include <EASTL/fixed_vector.h>
#include <EASTL/hash_map.h>
#include <glm/gtc/type_ptr.hpp>
#include <filesystem>
#include <stb_image.h>
#include <tiny_gltf.h>
#if defined(LoadImage)
@ -30,29 +31,19 @@
constexpr vk::CommandBufferBeginInfo OneTimeCmdBeginInfo = {.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit};
vec4
VectorToVec4(std::vector<double> const &vec)
VectorToVec4(const std::vector<double> &vec)
{
if (vec.empty())
{
return vec4{0.0f};
}
assert(vec.size() == 4);
return {vec[0], vec[1], vec[2], vec[3]};
}
vec4
VectorToVec4(std::vector<double> const &vec, float w)
{
if (vec.empty())
{
return vec4{0.0f};
}
assert(vec.size() == 3);
return {vec[0], vec[1], vec[2], w};
}
vec3
VectorToVec3(std::vector<double> const &vec)
VectorToVec3(const std::vector<double> &vec)
{
if (vec.empty())
{
@ -63,28 +54,51 @@ VectorToVec3(std::vector<double> const &vec)
return {vec[0], vec[1], vec[2]};
}
Ref<TextureView>
AssetLoader::LoadHdrImage(cstr path, cstr name) const
void
AssetLoader::LoadHdrImage(Texture *texture, cstr path, cstr name) const
{
const Device *pDevice = m_ResourceManager->m_Device;
ERROR_IF(texture->IsValid(), "Expected invalid image.") THEN_ABORT(-1);
i32 x, y, nChannels;
f32 *data = stbi_loadf(path, &x, &y, &nChannels, 4);
assert(nChannels == 3);
ERROR_IF(!data, "Could not load {}", path) THEN_ABORT(-1);
u32 width = static_cast<u32>(x);
u32 height = static_cast<u32>(y);
u32 width = Cast<u32>(x);
u32 height = Cast<u32>(y);
auto texture = m_Device->CreateTexture2DWithView({
.m_Format = vk::Format::eR32G32B32A32Sfloat,
.m_Extent = {width, height},
.m_Name = path,
.m_IsSampled = true,
.m_IsMipMapped = false,
.m_IsStorage = false,
});
StagingBuffer stagingBuffer;
texture->Init(m_ResourceManager->m_Device, {width, height}, vk::Format::eR32G32B32A32Sfloat, false, path);
assert(texture->IsValid());
stagingBuffer.Init(m_ResourceManager->m_Device, (sizeof *data) * x * y * 4, "HDR Staging Buffer");
stagingBuffer.Write(m_ResourceManager->m_Device, 0, stagingBuffer.GetSize(), data);
stbi_image_free(data);
#pragma region Setup Copy/Sync primitives
vk::BufferImageCopy2 copyRegion = {
.bufferOffset = 0,
.bufferRowLength = width,
.bufferImageHeight = height,
.imageSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {0, 0, 0},
.imageExtent = texture->m_Extent,
};
vk::CopyBufferToImageInfo2 stagingInfo = {
.srcBuffer = stagingBuffer.m_Buffer,
.dstImage = texture->m_Image,
.dstImageLayout = vk::ImageLayout::eTransferDstOptimal,
.regionCount = 1,
.pRegions = &copyRegion,
};
vk::ImageMemoryBarrier2 readyToStageBarrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eAllCommands,
.srcAccessMask = vk::AccessFlagBits2::eNone,
@ -94,7 +108,7 @@ AssetLoader::LoadHdrImage(cstr path, cstr name) const
.newLayout = vk::ImageLayout::eTransferDstOptimal,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = texture->GetImage(),
.image = texture->m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
@ -117,9 +131,9 @@ AssetLoader::LoadHdrImage(cstr path, cstr name) const
.dstAccessMask = vk::AccessFlagBits2::eShaderRead,
.oldLayout = vk::ImageLayout::eTransferDstOptimal,
.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
.srcQueueFamilyIndex = m_Device->m_TransferQueueFamily,
.dstQueueFamilyIndex = m_Device->m_PrimaryQueueFamily,
.image = texture->GetImage(),
.srcQueueFamilyIndex = m_TransferQueueIndex,
.dstQueueFamilyIndex = m_GraphicsQueueIndex,
.image = texture->m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
@ -137,31 +151,49 @@ AssetLoader::LoadHdrImage(cstr path, cstr name) const
};
#pragma endregion
auto context = m_Device->CreateTransferContext();
context.Begin();
AbortIfFailed(m_CommandBuffer.begin(&OneTimeCmdBeginInfo));
#if !defined(ASTER_NDEBUG)
StackString<128> loadActionName = "Load: ";
loadActionName += name ? name : path;
context.BeginDebugRegion(loadActionName.c_str());
vk::DebugUtilsLabelEXT debugLabel = {
.pLabelName = loadActionName.c_str(),
.color = std::array{1.0f, 1.0f, 1.0f, 1.0f},
};
m_CommandBuffer.beginDebugUtilsLabelEXT(&debugLabel);
#endif
context.Dependency(readyToStageDependency);
context.UploadTexture(texture->m_Image, {reinterpret_cast<u8 *>(data), (sizeof *data) * x * y * 4});
context.Dependency(postStagingDependency);
m_CommandBuffer.pipelineBarrier2(&readyToStageDependency);
m_CommandBuffer.copyBufferToImage2(&stagingInfo);
m_CommandBuffer.pipelineBarrier2(&postStagingDependency);
context.EndDebugRegion();
#if !defined(ASTER_NDEBUG)
m_CommandBuffer.endDebugUtilsLabelEXT();
#endif
context.End();
AbortIfFailed(m_CommandBuffer.end());
auto rcpt = m_Device->Submit(context);
stbi_image_free(data);
vk::SubmitInfo submitInfo = {
.waitSemaphoreCount = 0,
.pWaitDstStageMask = nullptr,
.commandBufferCount = 1,
.pCommandBuffers = &m_CommandBuffer,
};
m_Device->WaitOn(rcpt);
vk::Fence fence;
vk::FenceCreateInfo fenceCreateInfo = {};
AbortIfFailed(pDevice->m_Device.createFence(&fenceCreateInfo, nullptr, &fence));
AbortIfFailed(m_TransferQueue.submit(1, &submitInfo, fence));
AbortIfFailed(pDevice->m_Device.waitForFences(1, &fence, true, MaxValue<u32>));
pDevice->m_Device.destroy(fence, nullptr);
return texture;
AbortIfFailed(pDevice->m_Device.resetCommandPool(m_CommandPool, {}));
stagingBuffer.Destroy(pDevice);
}
void
GenerateMipMaps(systems::TransferContext &context, Ref<Texture> const &texture, vk::ImageLayout initialLayout,
GenerateMipMaps(vk::CommandBuffer commandBuffer, Texture *texture, vk::ImageLayout initialLayout,
vk::ImageLayout finalLayout, vk::PipelineStageFlags2 prevStage, vk::PipelineStageFlags2 finalStage)
{
#if !defined(ASTER_NDEBUG)
@ -169,7 +201,7 @@ GenerateMipMaps(systems::TransferContext &context, Ref<Texture> const &texture,
.pLabelName = "Generate Mipmap",
.color = std::array{0.9f, 0.9f, 0.9f, 1.0f},
};
context.BeginDebugRegion("Generate MipMap", {0.9, 0.9, 0.9, 1.0});
commandBuffer.beginDebugUtilsLabelEXT(&label);
#endif
vk::ImageMemoryBarrier2 imageStartBarrier = {
@ -211,7 +243,7 @@ GenerateMipMaps(systems::TransferContext &context, Ref<Texture> const &texture,
}
vk::DependencyInfo imageStartDependency = {
.imageMemoryBarrierCount = static_cast<u32>(startBarriers.size()),
.imageMemoryBarrierCount = Cast<u32>(startBarriers.size()),
.pImageMemoryBarriers = startBarriers.data(),
};
@ -292,10 +324,10 @@ GenerateMipMaps(systems::TransferContext &context, Ref<Texture> const &texture,
// Mip Mapping
context.Dependency(imageStartDependency);
commandBuffer.pipelineBarrier2(&imageStartDependency);
i32 prevMipWidth = static_cast<i32>(texture->m_Extent.width);
i32 prevMipHeight = static_cast<i32>(texture->m_Extent.height);
i32 prevMipWidth = Cast<i32>(texture->m_Extent.width);
i32 prevMipHeight = Cast<i32>(texture->m_Extent.height);
u32 maxPrevMip = texture->GetMipLevels() - 1;
for (u32 prevMipLevel = 0; prevMipLevel < maxPrevMip; ++prevMipLevel)
@ -317,50 +349,47 @@ GenerateMipMaps(systems::TransferContext &context, Ref<Texture> const &texture,
nextMipBarrier.subresourceRange.baseMipLevel = currentMipLevel;
context.Blit(mipBlitInfo);
context.Dependency(interMipDependency);
commandBuffer.blitImage2(&mipBlitInfo);
commandBuffer.pipelineBarrier2(&interMipDependency);
prevMipHeight = currentMipHeight;
prevMipWidth = currentMipWidth;
}
context.Dependency(imageReadyDependency);
commandBuffer.pipelineBarrier2(&imageReadyDependency);
#if !defined(ASTER_NDEBUG)
context.EndDebugRegion();
commandBuffer.endDebugUtilsLabelEXT();
#endif
}
systems::ResId<TextureView>
AssetLoader::LoadImageToGpu(systems::TransferContext &context, tinygltf::Image *image, bool isSrgb, cstr name) const
TextureHandle
AssetLoader::LoadImageToGpu(StagingBuffer *stagingBuffer, tinygltf::Image *image, bool isSrgb) const
{
// TODO(Something not loading properly).
assert(image->component == 4);
assert(image->height > 0 && image->width > 0);
#if !defined(ASTER_NDEBUG)
auto assignedName = name ? name : image->name.empty() ? image->uri.c_str() : image->name.c_str();
#else
auto assignedName = nullptr;
#endif
u32 height = static_cast<u32>(image->height);
u32 width = static_cast<u32>(image->width);
u32 height = Cast<u32>(image->height);
u32 width = Cast<u32>(image->width);
vk::Format imageFormat = isSrgb ? vk::Format::eR8G8B8A8Srgb : vk::Format::eR8G8B8A8Unorm;
auto texture = m_Device->CreateTexture2D<Texture>({
.m_Format = imageFormat,
.m_Extent = {width, height},
.m_Name = assignedName,
.m_IsSampled = true,
.m_IsMipMapped = true,
.m_IsStorage = false,
});
Texture texture;
usize byteSize = image->image.size();
texture.Init(m_ResourceManager->m_Device, {.width = width, .height = height}, imageFormat, true,
image->name.data());
stagingBuffer->Init(m_ResourceManager->m_Device, byteSize);
stagingBuffer->Write(m_ResourceManager->m_Device, 0, byteSize, image->image.data());
#if !defined(ASTER_NDEBUG)
StackString<128> loadActionName = "Load: ";
loadActionName += assignedName;
context.BeginDebugRegion(loadActionName.c_str());
loadActionName += image->name.empty() ? "<texture>" : image->name.c_str();
vk::DebugUtilsLabelEXT debugLabel = {
.pLabelName = loadActionName.c_str(),
.color = std::array{1.0f, 1.0f, 1.0f, 1.0f},
};
m_CommandBuffer.beginDebugUtilsLabelEXT(&debugLabel);
#endif
#pragma region Barriers and Blits
@ -373,7 +402,7 @@ AssetLoader::LoadImageToGpu(systems::TransferContext &context, tinygltf::Image *
.newLayout = vk::ImageLayout::eTransferDstOptimal,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = texture->m_Image,
.image = texture.m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
@ -392,14 +421,12 @@ AssetLoader::LoadImageToGpu(systems::TransferContext &context, tinygltf::Image *
vk::ImageMemoryBarrier2 postStagingBarrier = {
.srcStageMask = vk::PipelineStageFlagBits2::eAllTransfer,
.srcAccessMask = vk::AccessFlagBits2::eTransferWrite,
.dstStageMask = vk::PipelineStageFlagBits2::eAllTransfer,
.dstAccessMask = vk::AccessFlagBits2::eTransferRead,
.oldLayout = vk::ImageLayout::eTransferDstOptimal,
.newLayout = vk::ImageLayout::eTransferSrcOptimal,
.srcQueueFamilyIndex = m_Device->m_TransferQueueFamily,
.dstQueueFamilyIndex = m_Device->m_PrimaryQueueFamily,
.image = texture->m_Image,
.srcQueueFamilyIndex = m_TransferQueueIndex,
.dstQueueFamilyIndex = m_GraphicsQueueIndex,
.image = texture.m_Image,
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
@ -409,28 +436,49 @@ AssetLoader::LoadImageToGpu(systems::TransferContext &context, tinygltf::Image *
.layerCount = 1,
},
};
;
vk::DependencyInfo postStagingDependency = {
.imageMemoryBarrierCount = 1,
.pImageMemoryBarriers = &postStagingBarrier,
};
vk::BufferImageCopy2 imageCopy = {
.bufferOffset = 0,
.bufferRowLength = Cast<u32>(image->width),
.bufferImageHeight = Cast<u32>(image->height),
.imageSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {},
.imageExtent = texture.m_Extent,
};
vk::CopyBufferToImageInfo2 stagingCopyInfo = {
.srcBuffer = stagingBuffer->m_Buffer,
.dstImage = texture.m_Image,
.dstImageLayout = vk::ImageLayout::eTransferDstOptimal,
.regionCount = 1,
.pRegions = &imageCopy,
};
#pragma endregion
context.Dependency(imageStartDependency);
context.UploadTexture(texture, {image->image.data(), image->image.size()});
context.Dependency(postStagingDependency);
m_CommandBuffer.pipelineBarrier2(&imageStartDependency);
m_CommandBuffer.copyBufferToImage2(&stagingCopyInfo);
m_CommandBuffer.pipelineBarrier2(&postStagingDependency);
GenerateMipMaps(context, texture, vk::ImageLayout::eTransferSrcOptimal, vk::ImageLayout::eShaderReadOnlyOptimal);
GenerateMipMaps(m_CommandBuffer, &texture, vk::ImageLayout::eTransferSrcOptimal,
vk::ImageLayout::eShaderReadOnlyOptimal);
#if !defined(ASTER_NDEBUG)
context.EndDebugRegion();
m_CommandBuffer.endDebugUtilsLabelEXT();
#endif
auto textureView = m_Device->CreateView<TextureView>(
{.m_Image = texture, .m_Name = image->name.data(), .m_AspectMask = vk::ImageAspectFlagBits::eColor});
return m_Device->m_CommitManager->CommitTexture(textureView);
return m_ResourceManager->CommitTexture(&texture);
}
Model
@ -440,8 +488,10 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
tinygltf::Model model;
tinygltf::TinyGLTF loader;
auto const fsPath = fs::absolute(path);
auto const ext = fsPath.extension();
const Device *pDevice = m_ResourceManager->m_Device;
const auto fsPath = fs::absolute(path);
const auto ext = fsPath.extension();
if (ext == GLTF_ASCII_FILE_EXTENSION)
{
std::string err;
@ -463,36 +513,42 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
}
}
auto context = m_Device->CreateTransferContext();
context.Begin();
AbortIfFailed(m_CommandBuffer.begin(&OneTimeCmdBeginInfo));
#if !defined(ASTER_NDEBUG)
StackString<128> loadActionName = "Load: ";
loadActionName += name ? name : path;
context.BeginDebugRegion(loadActionName.c_str());
vk::DebugUtilsLabelEXT debugLabel = {
.pLabelName = loadActionName.c_str(),
.color = std::array{1.0f, 1.0f, 1.0f, 1.0f},
};
m_CommandBuffer.beginDebugUtilsLabelEXT(&debugLabel);
#endif
eastl::hash_map<i32, systems::ResId<TextureView>> textureHandleMap;
eastl::vector<StagingBuffer> stagingBuffers;
eastl::hash_map<i32, TextureHandle> textureHandleMap;
eastl::vector<Material> materials;
Ref<Buffer> materialsBuffer;
StorageBuffer materialsBuffer;
BufferHandle materialsHandle;
if (!model.materials.empty())
{
// TODO("Something broken on load here.");
auto getTextureHandle = [this, &context, &textureHandleMap,
&model](i32 index, bool const isSrgb) -> systems::ResId<TextureView> {
auto getTextureHandle = [this, &textureHandleMap, &stagingBuffers, &model](i32 index,
bool isSrgb) -> TextureHandle {
if (index < 0)
{
return systems::NullId{};
return {};
}
if (auto const iter = textureHandleMap.find(index); iter != textureHandleMap.end())
const auto iter = textureHandleMap.find(index);
if (iter != textureHandleMap.end())
{
return iter->second;
}
auto const &texture = model.textures[index];
auto *image = &model.images[texture.source];
auto handle = LoadImageToGpu(context, image, isSrgb, texture.name.empty() ? nullptr : texture.name.c_str());
auto *image = &model.images[index];
TextureHandle handle = LoadImageToGpu(&stagingBuffers.push_back(), image, isSrgb);
textureHandleMap.emplace(index, handle);
return handle;
};
@ -502,22 +558,28 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
{
materials.push_back({
.m_AlbedoFactor = VectorToVec4(material.pbrMetallicRoughness.baseColorFactor),
.m_EmissionFactor = VectorToVec4(material.emissiveFactor, 0.0f),
.m_EmissionFactor = VectorToVec3(material.emissiveFactor),
.m_MetalFactor = Cast<f32>(material.pbrMetallicRoughness.metallicFactor),
.m_RoughFactor = Cast<f32>(material.pbrMetallicRoughness.roughnessFactor),
.m_AlbedoTex = getTextureHandle(material.pbrMetallicRoughness.baseColorTexture.index, true),
.m_NormalTex = getTextureHandle(material.normalTexture.index, false),
.m_MetalRoughTex =
getTextureHandle(material.pbrMetallicRoughness.metallicRoughnessTexture.index, false),
.m_OcclusionTex = getTextureHandle(material.occlusionTexture.index, false),
.m_EmissionTex = getTextureHandle(material.emissiveTexture.index, true),
.m_MetalFactor = static_cast<f32>(material.pbrMetallicRoughness.metallicFactor),
.m_RoughFactor = static_cast<f32>(material.pbrMetallicRoughness.roughnessFactor),
});
}
usize materialsByteSize = materials.size() * sizeof materials[0];
materialsBuffer = m_Device->CreateStorageBuffer(materialsByteSize, name);
materialsBuffer.Init(pDevice, materialsByteSize, false, name);
materialsHandle = m_ResourceManager->Commit(&materialsBuffer);
context.UploadBuffer(materialsBuffer, materials);
StagingBuffer &materialStaging = stagingBuffers.push_back();
materialStaging.Init(pDevice, materialsByteSize);
materialStaging.Write(pDevice, 0, materialsByteSize, materials.data());
vk::BufferCopy bufferCopy = {.srcOffset = 0, .dstOffset = 0, .size = materialsByteSize};
m_CommandBuffer.copyBuffer(materialStaging.m_Buffer, materialsBuffer.m_Buffer, 1, &bufferCopy);
}
// TODO: Mesh reordering based on nodes AND OR meshoptimizer
@ -556,17 +618,17 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
tinygltf::Buffer *posBuffer = &model.buffers[posBufferView->buffer];
usize byteOffset = (posAccessor->byteOffset + posBufferView->byteOffset);
vertexCount = static_cast<u32>(posAccessor->count);
vertexCount = Cast<u32>(posAccessor->count);
vertexPositions.reserve(vertexOffset + vertexCount);
if (posAccessor->type == TINYGLTF_TYPE_VEC4)
{
auto data = reinterpret_cast<vec4 *>(posBuffer->data.data() + byteOffset);
vec4 *data = Recast<vec4 *>(posBuffer->data.data() + byteOffset);
vertexPositions.insert(vertexPositions.end(), data, data + vertexCount);
}
else if (posAccessor->type == TINYGLTF_TYPE_VEC3)
{
auto data = reinterpret_cast<vec3 *>(posBuffer->data.data() + byteOffset);
vec3 *data = Recast<vec3 *>(posBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
vertexPositions.push_back(vec4(data[i], 1.0f));
@ -574,7 +636,7 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
}
else if (posAccessor->type == TINYGLTF_TYPE_VEC2)
{
auto data = reinterpret_cast<vec2 *>(posBuffer->data.data() + byteOffset);
vec2 *data = Recast<vec2 *>(posBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
vertexPositions.push_back(vec4(data[i], 0.0f, 1.0f));
@ -598,7 +660,7 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
if (normAccessor->type == TINYGLTF_TYPE_VEC4)
{
auto data = reinterpret_cast<vec4 *>(normBuffer->data.data() + byteOffset);
vec4 *data = Recast<vec4 *>(normBuffer->data.data() + byteOffset);
vec4 *end = data + vertexCount;
u32 idx = vertexOffset;
@ -610,7 +672,7 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
}
else if (normAccessor->type == TINYGLTF_TYPE_VEC3)
{
auto data = reinterpret_cast<vec3 *>(normBuffer->data.data() + byteOffset);
vec3 *data = Recast<vec3 *>(normBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
auto norm = vec4(data[i], 0.0f);
@ -619,7 +681,7 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
}
else if (normAccessor->type == TINYGLTF_TYPE_VEC2)
{
auto data = reinterpret_cast<vec2 *>(normBuffer->data.data() + byteOffset);
vec2 *data = Recast<vec2 *>(normBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
auto norm = vec4(data[i], 0.0f, 0.0f);
@ -642,7 +704,7 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
assert(uvAccessor->type == TINYGLTF_TYPE_VEC2 &&
uvAccessor->componentType == TINYGLTF_COMPONENT_TYPE_FLOAT);
{
auto data = reinterpret_cast<vec2 *>(uvBuffer->data.data() + byteOffset);
vec2 *data = Recast<vec2 *>(uvBuffer->data.data() + byteOffset);
vec2 *end = data + vertexCount;
u32 idx = vertexOffset;
vec2 *it = data;
@ -665,7 +727,7 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
if (colorAccessor->type == TINYGLTF_TYPE_VEC4)
{
auto data = reinterpret_cast<vec4 *>(colorBuffer->data.data() + byteOffset);
vec4 *data = Recast<vec4 *>(colorBuffer->data.data() + byteOffset);
vec4 *end = data + vertexCount;
u32 idx = vertexOffset;
@ -677,7 +739,7 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
}
else if (colorAccessor->type == TINYGLTF_TYPE_VEC3)
{
auto data = reinterpret_cast<vec3 *>(colorBuffer->data.data() + byteOffset);
vec3 *data = Recast<vec3 *>(colorBuffer->data.data() + byteOffset);
for (u32 i = 0; i < vertexCount; ++i)
{
auto color = vec4(data[i], 1.0f);
@ -698,22 +760,22 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
tinygltf::Buffer *indexBuffer = &model.buffers[indexBufferView->buffer];
usize byteOffset = (indexAccessor->byteOffset + indexBufferView->byteOffset);
indexCount = static_cast<u32>(indexAccessor->count);
indexCount = Cast<u32>(indexAccessor->count);
indices.reserve(indexOffset + indexCount);
if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_INT)
{
auto data = reinterpret_cast<u32 *>(indexBuffer->data.data() + byteOffset);
u32 *data = Recast<u32 *>(indexBuffer->data.data() + byteOffset);
indices.insert(indices.end(), data, data + indexCount);
}
else if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_SHORT)
{
auto data = reinterpret_cast<u16 *>(indexBuffer->data.data() + byteOffset);
u16 *data = Recast<u16 *>(indexBuffer->data.data() + byteOffset);
indices.insert(indices.end(), data, data + indexCount);
}
else if (indexAccessor->componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_BYTE)
{
auto data = reinterpret_cast<u8 *>(indexBuffer->data.data() + byteOffset);
u8 *data = Recast<u8 *>(indexBuffer->data.data() + byteOffset);
indices.insert(indices.end(), data, data + indexCount);
}
}
@ -748,12 +810,12 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
{
eastl::function<void(i32, i32)> processNode = [&processNode, &model, &nodes, &meshPrimRanges,
&meshPrimitives](i32 idx, i32 parent) -> void {
auto const *node = &model.nodes[idx];
const auto *node = &model.nodes[idx];
auto nodeTranslation = vec3{0.0f};
auto nodeRotation = quat{1.0f, 0.0f, 0.0f, 0.0f};
auto nodeScale = vec3{1.0f};
auto nodeMatrix = mat4{1.0f};
vec3 nodeTranslation = vec3{0.0f};
quat nodeRotation = quat{1.0f, 0.0f, 0.0f, 0.0f};
vec3 nodeScale = vec3{1.0f};
mat4 nodeMatrix = mat4{1.0f};
if (node->translation.size() == 3)
{
nodeTranslation = glm::make_vec3(node->translation.data());
@ -771,21 +833,21 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
{
nodeMatrix = glm::make_mat4(node->matrix.data());
}
mat4 const transform = translate(mat4(1.0f), nodeTranslation) * mat4_cast(nodeRotation) *
const mat4 transform = translate(mat4(1.0f), nodeTranslation) * mat4_cast(nodeRotation) *
scale(mat4(1.0f), nodeScale) * nodeMatrix;
i32 const nodeArrayIndex = static_cast<i32>(nodes.Add(transform, parent));
const i32 nodeArrayIndex = Cast<i32>(nodes.Add(transform, parent));
if (node->mesh >= 0)
{
auto [start, count] = meshPrimRanges[node->mesh];
auto const end = start + count;
const auto end = start + count;
for (usize i = start; i != end; ++i)
{
meshPrimitives[i].m_TransformIdx = nodeArrayIndex;
}
}
for (i32 const child : node->children)
for (const i32 child : node->children)
{
processNode(child, nodeArrayIndex);
}
@ -800,46 +862,76 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
nodes.Update();
auto nodeBuffer = m_Device->CreateStorageBuffer(nodes.GetGlobalTransformByteSize());
nodeBuffer->Write(0, nodes.GetGlobalTransformByteSize(), nodes.GetGlobalTransformPtr());
StorageBuffer nodeBuffer;
nodeBuffer.Init(pDevice, nodes.GetGlobalTransformByteSize(), true);
nodeBuffer.Write(pDevice, 0, nodes.GetGlobalTransformByteSize(), nodes.GetGlobalTransformPtr());
BufferHandle nodeHandle = m_ResourceManager->Commit(&nodeBuffer);
#pragma region Staging / Transfer / Uploads
systems::ResId<Buffer> positionBufferHandle = systems::ResId<Buffer>::Null();
systems::ResId<Buffer> vertexDataHandle = systems::ResId<Buffer>::Null();
Ref<IndexBuffer> indexBuffer;
BufferHandle positionBufferHandle;
BufferHandle vertexDataHandle;
IndexBuffer indexBuffer;
auto positionBuffer = m_Device->CreateStorageBuffer(vertexPositions.size() * sizeof vertexPositions[0]);
context.UploadBuffer(positionBuffer, vertexPositions);
{
auto uploadBufferData = [cmd = this->m_CommandBuffer, &stagingBuffers, pDevice](const Buffer *buffer,
const void *data) {
vk::BufferCopy bufferCopy = {.srcOffset = 0, .dstOffset = 0, .size = buffer->GetSize()};
StagingBuffer &stagingBuffer = stagingBuffers.push_back();
stagingBuffer.Init(pDevice, bufferCopy.size);
stagingBuffer.Write(pDevice, 0, bufferCopy.size, data);
cmd.copyBuffer(stagingBuffer.m_Buffer, buffer->m_Buffer, 1, &bufferCopy);
};
auto vertexDataBuffer = m_Device->CreateStorageBuffer(vertexData.size() * sizeof vertexData[0]);
context.UploadBuffer(vertexDataBuffer, vertexData);
StorageBuffer positionBuffer;
positionBuffer.Init(pDevice, vertexPositions.size() * sizeof vertexPositions[0], false);
positionBufferHandle = m_ResourceManager->Commit(&positionBuffer);
uploadBufferData(&positionBuffer, vertexPositions.data());
// TODO: Index buffer needs to be separated.
indexBuffer = systems::CastBuffer<IndexBuffer>(
m_Device->CreateIndexBuffer(indices.size() * sizeof indices[0], "Index Buffer"));
context.UploadBuffer(indexBuffer, indices);
StorageBuffer vertexDataBuffer;
vertexDataBuffer.Init(pDevice, vertexData.size() * sizeof vertexData[0], false);
vertexDataHandle = m_ResourceManager->Commit(&vertexDataBuffer);
uploadBufferData(&vertexDataBuffer, vertexData.data());
indexBuffer.Init(pDevice, indices.size() * sizeof indices[0]);
uploadBufferData(&indexBuffer, indices.data());
}
#pragma endregion
#if !defined(ASTER_NDEBUG)
context.EndDebugRegion();
m_CommandBuffer.endDebugUtilsLabelEXT();
#endif
context.End();
AbortIfFailed(m_CommandBuffer.end());
auto rcpt = m_Device->Submit(context);
m_Device->WaitOn(rcpt);
vk::SubmitInfo submitInfo = {
.waitSemaphoreCount = 0,
.pWaitDstStageMask = nullptr,
.commandBufferCount = 1,
.pCommandBuffers = &m_CommandBuffer,
};
vk::Fence fence;
vk::FenceCreateInfo fenceCreateInfo = {};
AbortIfFailed(pDevice->m_Device.createFence(&fenceCreateInfo, nullptr, &fence));
AbortIfFailed(m_TransferQueue.submit(1, &submitInfo, fence));
AbortIfFailed(pDevice->m_Device.waitForFences(1, &fence, true, MaxValue<u32>));
pDevice->m_Device.destroy(fence, nullptr);
AbortIfFailed(pDevice->m_Device.resetCommandPool(m_CommandPool, {}));
for (auto &buffer : stagingBuffers)
{
buffer.Destroy(pDevice);
}
Model::ModelHandles handles = {
.m_VertexPositionHandle = positionBuffer,
.m_VertexDataHandle = vertexDataBuffer,
.m_MaterialsHandle = materialsBuffer,
.m_NodeHandle = nodeBuffer,
.m_VertexPositionHandle = positionBufferHandle,
.m_VertexDataHandle = vertexDataHandle,
.m_MaterialsHandle = materialsHandle,
.m_NodeHandle = nodeHandle,
};
Model::ModelHandlesData handlesData = handles;
auto handlesBuffer = m_Device->CreateStorageBuffer(sizeof handlesData, "Materials");
handlesBuffer->Write(0, sizeof handlesData, &handlesData);
eastl::vector<systems::ResId<TextureView>> textureHandles;
eastl::vector<TextureHandle> textureHandles;
textureHandles.reserve(textureHandleMap.size());
for (auto &[key, val] : textureHandleMap)
@ -848,45 +940,139 @@ AssetLoader::LoadModelToGpu(cstr path, cstr name)
}
return Model{
textureHandles, std::move(nodes), nodeBuffer, handles, handlesBuffer, indexBuffer, meshPrimitives,
m_ResourceManager, std::move(textureHandles), std::move(nodes), handles, indexBuffer, meshPrimitives,
};
}
Model::Model(eastl::vector<systems::ResId<TextureView>> &textureHandles, Nodes &&nodes, Ref<Buffer> nodeBuffer,
ModelHandles &handles, Ref<Buffer> modelHandlesBuffer, Ref<IndexBuffer> indexBuffer,
eastl::vector<MeshPrimitive> const &meshPrimitives)
: m_TextureHandles(std::move(textureHandles))
Model::Model(GpuResourceManager *resourceManager, eastl::vector<TextureHandle> &&textureHandles, Nodes &&nodes,
const ModelHandles &handles, const IndexBuffer &indexBuffer,
const eastl::vector<MeshPrimitive> &meshPrimitives)
: m_ResourceManager(resourceManager)
, m_TextureHandles(std::move(textureHandles))
, m_Nodes(std::move(nodes))
, m_Handles(std::move(handles))
, m_NodeBuffer(std::move(nodeBuffer))
, m_IndexBuffer(std::move(indexBuffer))
, m_ModelHandlesBuffer(std::move(modelHandlesBuffer))
, m_Handles(handles)
, m_IndexBuffer(indexBuffer)
, m_MeshPrimitives(meshPrimitives)
{
}
mat4 const &
Model::Model(Model &&other) noexcept
: m_ResourceManager(Take(other.m_ResourceManager))
, m_TextureHandles(std::move(other.m_TextureHandles))
, m_Handles(other.m_Handles)
, m_IndexBuffer(other.m_IndexBuffer)
, m_MeshPrimitives(std::move(other.m_MeshPrimitives))
{
}
Model &
Model::operator=(Model &&other) noexcept
{
if (this == &other)
return *this;
m_ResourceManager = Take(other.m_ResourceManager);
m_TextureHandles = std::move(other.m_TextureHandles);
m_Handles = other.m_Handles;
m_IndexBuffer = other.m_IndexBuffer;
m_MeshPrimitives = std::move(other.m_MeshPrimitives);
return *this;
}
const mat4 &
Model::GetModelTransform() const
{
return m_Nodes[0];
}
void
Model::SetModelTransform(mat4 const &transform)
Model::SetModelTransform(const mat4 &transform)
{
m_Nodes.Set(0, transform);
}
Model::~Model()
{
if (!m_ResourceManager)
return;
m_IndexBuffer.Destroy(m_ResourceManager->m_Device);
m_ResourceManager->Release(m_Handles.m_VertexDataHandle);
m_ResourceManager->Release(m_Handles.m_NodeHandle);
m_ResourceManager->Release(m_Handles.m_VertexPositionHandle);
m_ResourceManager->Release(m_Handles.m_MaterialsHandle);
for (const TextureHandle &handle : m_TextureHandles)
{
m_ResourceManager->Release(handle);
}
}
void
Model::Update()
{
if (m_Nodes.Update())
{
m_NodeBuffer->Write(0, m_Nodes.GetGlobalTransformByteSize(), m_Nodes.GetGlobalTransformPtr());
m_ResourceManager->Write(m_Handles.m_NodeHandle, 0, m_Nodes.GetGlobalTransformByteSize(),
m_Nodes.GetGlobalTransformPtr());
}
}
AssetLoader::AssetLoader(systems::RenderingDevice &device)
: m_Device{&device}
AssetLoader::AssetLoader(GpuResourceManager *resourceManager, vk::Queue transferQueue, u32 transferQueueIndex,
u32 graphicsQueueIndex)
: m_ResourceManager(resourceManager)
, m_TransferQueue(transferQueue)
, m_TransferQueueIndex(transferQueueIndex)
, m_GraphicsQueueIndex(graphicsQueueIndex)
{
const Device *pDevice = resourceManager->m_Device;
const vk::CommandPoolCreateInfo poolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient,
.queueFamilyIndex = transferQueueIndex,
};
AbortIfFailedM(pDevice->m_Device.createCommandPool(&poolCreateInfo, nullptr, &m_CommandPool),
"Transfer command pool creation failed.");
pDevice->SetName(m_CommandPool, "Asset Loader Command Pool");
const vk::CommandBufferAllocateInfo commandBufferAllocateInfo = {
.commandPool = m_CommandPool,
.level = vk::CommandBufferLevel::ePrimary,
.commandBufferCount = 1,
};
AbortIfFailed(pDevice->m_Device.allocateCommandBuffers(&commandBufferAllocateInfo, &m_CommandBuffer));
pDevice->SetName(m_CommandBuffer, "Asset Loader Command Buffer");
}
AssetLoader::~AssetLoader()
{
if (m_ResourceManager)
{
m_ResourceManager->m_Device->m_Device.destroy(m_CommandPool, nullptr);
}
}
AssetLoader::AssetLoader(AssetLoader &&other) noexcept
: m_ResourceManager(Take(other.m_ResourceManager))
, m_CommandPool(other.m_CommandPool)
, m_CommandBuffer(other.m_CommandBuffer)
, m_TransferQueue(other.m_TransferQueue)
, m_TransferQueueIndex(other.m_TransferQueueIndex)
, m_GraphicsQueueIndex(other.m_GraphicsQueueIndex)
{
}
AssetLoader &
AssetLoader::operator=(AssetLoader &&other) noexcept
{
if (this == &other)
return *this;
m_ResourceManager = Take(other.m_ResourceManager);
m_CommandPool = other.m_CommandPool;
m_CommandBuffer = other.m_CommandBuffer;
m_TransferQueue = other.m_TransferQueue;
m_TransferQueueIndex = other.m_TransferQueueIndex;
m_GraphicsQueueIndex = other.m_GraphicsQueueIndex;
return *this;
}

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: asset_loader.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
@ -9,24 +9,8 @@
#include "aster/core/buffer.h"
#include "aster/systems/resource.h"
#include "gpu_resource_manager.h"
#include "nodes.h"
#include "tiny_gltf.h"
namespace systems
{
class TransferContext;
}
namespace systems
{
class RenderingDevice;
class ResourceManager;
class SamplerManager;
class BufferManager;
class ImageManager;
class CommitManager;
} // namespace systems
namespace tinygltf
{
@ -34,6 +18,7 @@ struct Image;
}
struct Image;
struct TextureHandle;
struct Texture;
constexpr auto GLTF_ASCII_FILE_EXTENSION = ".gltf";
@ -51,14 +36,14 @@ struct MeshPrimitive
struct Material
{
vec4 m_AlbedoFactor; // 16 16
vec4 m_EmissionFactor; // 16 32
systems::ResId<TextureView> m_AlbedoTex; // 08 40
systems::ResId<TextureView> m_NormalTex; // 08 48
systems::ResId<TextureView> m_MetalRoughTex; // 08 56
systems::ResId<TextureView> m_OcclusionTex; // 08 64
systems::ResId<TextureView> m_EmissionTex; // 08 72
f32 m_MetalFactor; // 04 76
f32 m_RoughFactor; // 04 80
vec3 m_EmissionFactor; // 12 28
f32 m_MetalFactor; // 04 32
f32 m_RoughFactor; // 04 36
TextureHandle m_AlbedoTex; // 04 40
TextureHandle m_NormalTex; // 04 44
TextureHandle m_MetalRoughTex; // 04 48
TextureHandle m_OcclusionTex; // 04 52
TextureHandle m_EmissionTex; // 04 56
};
struct VertexData
@ -71,61 +56,49 @@ struct VertexData
struct Model
{
eastl::vector<systems::ResId<TextureView>> m_TextureHandles;
Nodes m_Nodes;
GpuResourceManager *m_ResourceManager;
struct ModelHandlesData
{
uptr m_VertexPositionHandle;
uptr m_VertexDataHandle;
uptr m_MaterialsHandle;
uptr m_NodeHandle;
};
eastl::vector<TextureHandle> m_TextureHandles;
Nodes m_Nodes;
struct ModelHandles
{
Ref<Buffer> m_VertexPositionHandle;
Ref<Buffer> m_VertexDataHandle;
Ref<Buffer> m_MaterialsHandle;
Ref<Buffer> m_NodeHandle;
operator ModelHandlesData() const
{
return {
.m_VertexPositionHandle = m_VertexPositionHandle->GetDeviceAddress(),
.m_VertexDataHandle = m_VertexDataHandle->GetDeviceAddress(),
.m_MaterialsHandle = m_MaterialsHandle->GetDeviceAddress(),
.m_NodeHandle = m_NodeHandle->GetDeviceAddress(),
};
}
BufferHandle m_VertexPositionHandle;
BufferHandle m_VertexDataHandle;
BufferHandle m_MaterialsHandle;
BufferHandle m_NodeHandle;
} m_Handles;
Ref<Buffer> m_NodeBuffer;
Ref<IndexBuffer> m_IndexBuffer;
Ref<Buffer> m_ModelHandlesBuffer;
IndexBuffer m_IndexBuffer;
eastl::vector<MeshPrimitive> m_MeshPrimitives;
[[nodiscard]] mat4 const &GetModelTransform() const;
void SetModelTransform(mat4 const &transform);
[[nodiscard]] const mat4 &GetModelTransform() const;
void SetModelTransform(const mat4 &transform);
void Update();
Model(eastl::vector<systems::ResId<TextureView>> &textureHandles, Nodes &&nodes, Ref<Buffer> nodeBuffer,
ModelHandles &handles, Ref<Buffer> modelHandlesBuffer, Ref<IndexBuffer> indexBuffer,
eastl::vector<MeshPrimitive> const &meshPrimitives);
~Model() = default;
Model(GpuResourceManager *resourceManager, eastl::vector<TextureHandle> &&textureHandles, Nodes &&nodes,
const ModelHandles &handles, const IndexBuffer &indexBuffer,
const eastl::vector<MeshPrimitive> &meshPrimitives);
~Model();
Model(Model &&other) noexcept = default;
Model &operator=(Model &&other) noexcept = default;
Model(Model &&other) noexcept;
Model &operator=(Model &&other) noexcept;
Model(Model const &) = delete;
Model const &operator=(Model const &) = delete;
Model(const Model &) = delete;
const Model &operator=(const Model &) = delete;
};
struct AssetLoader
{
systems::RenderingDevice *m_Device;
GpuResourceManager *m_ResourceManager;
vk::CommandPool m_CommandPool;
vk::CommandBuffer m_CommandBuffer;
vk::Queue m_TransferQueue;
u32 m_TransferQueueIndex;
u32 m_GraphicsQueueIndex;
Ref<TextureView> LoadHdrImage(cstr path, cstr name = nullptr) const;
void LoadHdrImage(Texture *texture, cstr path, cstr name = nullptr) const;
TextureHandle LoadImageToGpu(StagingBuffer *stagingBuffer, tinygltf::Image *image, bool isSrgb) const;
Model LoadModelToGpu(cstr path, cstr name = nullptr);
constexpr static auto ANormal = "NORMAL";
@ -137,32 +110,17 @@ struct AssetLoader
constexpr static auto AJoints0 = "JOINTS_0";
constexpr static auto AWeights0 = "WEIGHTS_0";
explicit AssetLoader(systems::RenderingDevice &device);
AssetLoader(GpuResourceManager *resourceManager, vk::Queue transferQueue, u32 transferQueueIndex,
u32 graphicsQueueIndex);
~AssetLoader();
private:
systems::ResId<TextureView>
LoadImageToGpu(systems::TransferContext &context, tinygltf::Image *image, bool isSrgb, cstr name = nullptr) const;
AssetLoader(AssetLoader &&other) noexcept;
AssetLoader &operator=(AssetLoader &&other) noexcept;
DISALLOW_COPY_AND_ASSIGN(AssetLoader);
};
void
GenerateMipMaps(systems::TransferContext &context, Ref<Texture> const &textureView, vk::ImageLayout initialLayout,
vk::ImageLayout finalLayout, vk::PipelineStageFlags2 prevStage, vk::PipelineStageFlags2 finalStage);
void
GenerateMipMaps(systems::TransferContext &context, concepts::ImageRefTo<Texture> auto &texture,
vk::ImageLayout initialLayout, vk::ImageLayout finalLayout,
void GenerateMipMaps(vk::CommandBuffer commandBuffer, Texture *texture, vk::ImageLayout initialLayout,
vk::ImageLayout finalLayout,
vk::PipelineStageFlags2 prevStage = vk::PipelineStageFlagBits2::eAllCommands,
vk::PipelineStageFlags2 finalStage = vk::PipelineStageFlagBits2::eAllCommands)
{
GenerateMipMaps(context, systems::CastImage<Texture>(texture), initialLayout, finalLayout, prevStage, finalStage);
}
void
GenerateMipMaps(systems::TransferContext &context, concepts::ViewRefTo<Texture> auto &texture,
vk::ImageLayout initialLayout, vk::ImageLayout finalLayout,
vk::PipelineStageFlags2 prevStage = vk::PipelineStageFlagBits2::eAllCommands,
vk::PipelineStageFlags2 finalStage = vk::PipelineStageFlagBits2::eAllCommands)
{
GenerateMipMaps(context, systems::CastImage<Texture>(texture->m_Image), initialLayout, finalLayout, prevStage,
finalStage);
}
vk::PipelineStageFlags2 finalStage = vk::PipelineStageFlagBits2::eAllCommands);

View File

@ -0,0 +1,688 @@
// =============================================
// Aster: gpu_resource_manager.cpp
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "gpu_resource_manager.h"
#include "helpers.h"
#include "aster/core/buffer.h"
#include "aster/core/device.h"
#include "aster/core/image.h"
#include <EASTL/array.h>
void
TextureManager::Init(const u32 maxCapacity)
{
m_MaxCapacity = maxCapacity;
m_FreeHead = GpuResourceHandle::INVALID_HANDLE;
}
TextureHandle
TextureManager::Commit(Texture *texture)
{
ERROR_IF(!texture || !texture->IsValid(), "Texture must be valid for committal")
THEN_ABORT(-1);
if (m_FreeHead != GpuResourceHandle::INVALID_HANDLE)
{
const u32 index = m_FreeHead;
Texture *allocatedTexture = &m_Textures[index];
assert(!allocatedTexture->IsValid());
m_FreeHead = *Recast<u32 *>(allocatedTexture);
// Ensure it is copyable.
static_assert(std::is_trivially_copyable_v<Texture>);
*allocatedTexture = *texture;
// Take ownership of the texture.
texture->m_Flags_ &= ~Texture::OWNED_BIT;
return {index};
}
const u32 index = Cast<u32>(m_Textures.size());
if (index < m_MaxCapacity)
{
Texture *allocatedTexture = &m_Textures.push_back();
// Ensure it is copyable.
static_assert(std::is_trivially_copyable_v<Texture>);
*allocatedTexture = *texture;
texture->m_Flags_ &= ~Texture::OWNED_BIT;
return {index};
}
ERROR("Out of Buffers") THEN_ABORT(-1);
}
Texture *
TextureManager::Fetch(const TextureHandle handle)
{
assert(!handle.IsInvalid());
return &m_Textures[handle.m_Index];
}
void
TextureManager::Release(const Device *device, const TextureHandle handle)
{
assert(!handle.IsInvalid());
Texture *allocatedTexture = &m_Textures[handle.m_Index];
allocatedTexture->Destroy(device);
assert(!allocatedTexture->IsValid());
*Recast<u32 *>(allocatedTexture) = m_FreeHead;
m_FreeHead = handle.m_Index;
}
void
TextureManager::Destroy(const Device *device)
{
for (auto &texture : m_Textures)
{
texture.Destroy(device);
}
}
void
BufferManager::Init(const u32 maxCapacity)
{
m_MaxCapacity = maxCapacity;
m_FreeHead = GpuResourceHandle::INVALID_HANDLE;
}
BufferHandle
BufferManager::Commit(StorageBuffer *buffer)
{
ERROR_IF(!buffer || !buffer->IsValid() || !buffer->IsOwned(), "Buffer must be valid and owned for commital")
THEN_ABORT(-1);
if (m_FreeHead != GpuResourceHandle::INVALID_HANDLE)
{
const u32 index = m_FreeHead;
StorageBuffer *allocatedBuffer = &m_Buffers[index];
assert(!allocatedBuffer->IsValid());
m_FreeHead = *Recast<u32 *>(allocatedBuffer);
// Ensure it is copyable.
static_assert(std::is_trivially_copyable_v<StorageBuffer>);
*allocatedBuffer = *buffer;
// Take ownership of the buffer.
buffer->m_Size_ &= ~StorageBuffer::OWNED_BIT;
return {index};
}
const u32 index = Cast<u32>(m_Buffers.size());
if (index < m_MaxCapacity)
{
StorageBuffer *allocatedBuffer = &m_Buffers.push_back();
// Ensure it is copyable.
static_assert(std::is_trivially_copyable_v<StorageBuffer>);
*allocatedBuffer = *buffer;
buffer->m_Size_ &= ~StorageBuffer::OWNED_BIT;
return {index};
}
ERROR("Out of Buffers") THEN_ABORT(-1);
}
StorageBuffer *
BufferManager::Fetch(const BufferHandle handle)
{
assert(!handle.IsInvalid());
return &m_Buffers[handle.m_Index];
}
void
BufferManager::Release(const Device *device, const BufferHandle handle)
{
assert(!handle.IsInvalid());
StorageBuffer *allocatedBuffer = &m_Buffers[handle.m_Index];
allocatedBuffer->Destroy(device);
assert(!allocatedBuffer->IsValid());
*Recast<u32 *>(allocatedBuffer) = m_FreeHead;
m_FreeHead = handle.m_Index;
}
void
BufferManager::Destroy(const Device *device)
{
for (auto &buffer : m_Buffers)
{
buffer.Destroy(device);
}
}
StorageTextureHandle
StorageTextureManager::Commit(StorageTexture *texture)
{
const TextureHandle tx = TextureManager::Commit(texture);
return {tx.m_Index};
}
StorageTexture *
StorageTextureManager::Fetch(const StorageTextureHandle handle)
{
assert(!handle.IsInvalid());
return Recast<StorageTexture *>(&m_Textures[handle.m_Index]);
}
void
StorageTextureManager::Release(const Device *device, const StorageTextureHandle handle)
{
TextureManager::Release(device, {handle.m_Index});
}
usize
HashSamplerCreateInfo(const vk::SamplerCreateInfo *createInfo)
{
usize hash = HashAny(createInfo->flags);
hash = HashCombine(hash, HashAny(createInfo->magFilter));
hash = HashCombine(hash, HashAny(createInfo->minFilter));
hash = HashCombine(hash, HashAny(createInfo->mipmapMode));
hash = HashCombine(hash, HashAny(createInfo->addressModeU));
hash = HashCombine(hash, HashAny(createInfo->addressModeV));
hash = HashCombine(hash, HashAny(createInfo->addressModeW));
hash = HashCombine(hash, HashAny(Cast<usize>(createInfo->mipLodBias * 1000))); // Resolution of 10^-3
hash = HashCombine(hash, HashAny(createInfo->anisotropyEnable));
hash = HashCombine(hash,
HashAny(Cast<usize>(createInfo->maxAnisotropy * 0x10))); // 16:1 Anisotropy is enough resolution
hash = HashCombine(hash, HashAny(createInfo->compareEnable));
hash = HashCombine(hash, HashAny(createInfo->compareOp));
hash = HashCombine(hash, HashAny(Cast<usize>(createInfo->minLod * 1000))); // 0.001 resolution is enough.
hash = HashCombine(hash,
HashAny(Cast<usize>(createInfo->maxLod * 1000))); // 0.001 resolution is enough. (1 == NO Clamp)
hash = HashCombine(hash, HashAny(createInfo->borderColor));
hash = HashCombine(hash, HashAny(createInfo->unnormalizedCoordinates));
return hash;
}
void
SamplerManager::Init(usize size)
{
m_Samplers.reserve(size);
m_SamplerHashes.reserve(size);
}
SamplerHandle
SamplerManager::Create(const Device *device, const vk::SamplerCreateInfo *createInfo)
{
const usize hash = HashSamplerCreateInfo(createInfo);
for (u32 index = 0; usize samplerHash : m_SamplerHashes)
{
if (samplerHash == hash)
{
return {index};
}
++index;
}
vk::Sampler sampler;
AbortIfFailed(device->m_Device.createSampler(createInfo, nullptr, &sampler));
const u32 index = Cast<u32>(m_SamplerHashes.size());
m_SamplerHashes.push_back(hash);
m_Samplers.push_back(sampler);
return {index};
}
vk::Sampler
SamplerManager::Fetch(const SamplerHandle handle)
{
assert(!handle.IsInvalid());
return m_Samplers[handle.m_Index];
}
void
SamplerManager::Destroy(const Device *device)
{
for (const auto &sampler : m_Samplers)
{
device->m_Device.destroy(sampler, nullptr);
}
m_Samplers.clear();
m_SamplerHashes.clear();
}
GpuResourceManager::WriteInfo::WriteInfo(vk::DescriptorBufferInfo info)
: uBufferInfo(info)
{
}
GpuResourceManager::WriteInfo::WriteInfo(vk::DescriptorImageInfo info)
: uImageInfo(info)
{
}
GpuResourceManager::WriteInfo::WriteInfo(vk::BufferView info)
: uBufferView(info)
{
}
BufferHandle
GpuResourceManager::Commit(StorageBuffer *storageBuffer)
{
const BufferHandle handle = m_BufferManager.Commit(storageBuffer);
m_WriteInfos.emplace_back(vk::DescriptorBufferInfo{
.buffer = storageBuffer->m_Buffer,
.offset = 0,
.range = storageBuffer->GetSize(),
});
m_Writes.push_back({
.dstSet = m_DescriptorSet,
.dstBinding = BUFFER_BINDING_INDEX,
.dstArrayElement = handle.m_Index,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eStorageBuffer,
.pBufferInfo = &m_WriteInfos.back().uBufferInfo,
});
m_WriteOwner.emplace_back(HandleType::eBuffer, handle.m_Index);
#if !defined(ASTER_NDEBUG)
++m_CommitedBufferCount;
#endif
return handle;
}
void
GpuResourceManager::Write(const BufferHandle handle, const usize offset, const usize size, const void *data)
{
m_BufferManager.Fetch(handle)->Write(m_Device, offset, size, data);
}
void
GpuResourceManager::EraseWrites(u32 handleIndex, HandleType handleType)
{
auto writeIter = m_Writes.begin();
auto ownerIter = m_WriteOwner.begin();
const auto ownerEnd = m_WriteOwner.end();
while (ownerIter != ownerEnd)
{
if (ownerIter->first == handleType && ownerIter->second == handleIndex)
{
*writeIter = m_Writes.back();
*ownerIter = m_WriteOwner.back();
m_Writes.pop_back();
m_WriteOwner.pop_back();
return;
}
++ownerIter;
++writeIter;
}
}
void
GpuResourceManager::Release(BufferHandle handle)
{
if (handle.IsInvalid())
return;
EraseWrites(handle.m_Index, HandleType::eBuffer);
m_BufferManager.Release(m_Device, handle);
#if !defined(ASTER_NDEBUG)
--m_CommitedBufferCount;
#endif
}
void
GpuResourceManager::Release(StorageBuffer *storageBuffer, const BufferHandle handle)
{
assert(storageBuffer);
assert(!storageBuffer->IsValid());
StorageBuffer *internal = m_BufferManager.Fetch(handle);
*storageBuffer = *internal;
internal->m_Size_ &= ~StorageBuffer::OWNED_BIT;
Release(handle);
}
void
GpuResourceManager::Release(TextureHandle handle)
{
if (handle.IsInvalid())
return;
EraseWrites(handle.m_Index, HandleType::eTexture);
m_TextureManager.Release(m_Device, handle);
#if !defined(ASTER_NDEBUG)
--m_CommitedTextureCount;
#endif
}
void
GpuResourceManager::Release(Texture *texture, TextureHandle handle)
{
assert(texture);
assert(!texture->IsValid());
Texture *internal = m_TextureManager.Fetch(handle);
*texture = *internal;
internal->m_Flags_ &= ~Texture::OWNED_BIT;
Release(handle);
}
TextureHandle
GpuResourceManager::CommitTexture(Texture *texture, const SamplerHandle sampler)
{
TextureHandle handle = m_TextureManager.Commit(texture);
const vk::Sampler samplerImpl = sampler.IsInvalid() ? m_DefaultSampler : m_SamplerManager.Fetch(sampler);
m_WriteInfos.emplace_back(vk::DescriptorImageInfo{
.sampler = samplerImpl,
.imageView = texture->m_View,
.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
});
m_Writes.push_back({
.dstSet = m_DescriptorSet,
.dstBinding = TEXTURE_BINDING_INDEX,
.dstArrayElement = handle.m_Index,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
.pImageInfo = &m_WriteInfos.back().uImageInfo,
});
m_WriteOwner.emplace_back(HandleType::eTexture, handle.m_Index);
#if !defined(ASTER_NDEBUG)
++m_CommitedTextureCount;
#endif
return {handle};
}
StorageTextureHandle
GpuResourceManager::CommitStorageTexture(StorageTexture *storageTexture, SamplerHandle sampler)
{
StorageTextureHandle handle = m_StorageTextureManager.Commit(storageTexture);
vk::Sampler samplerImpl = sampler.IsInvalid() ? m_DefaultSampler : m_SamplerManager.Fetch(sampler);
m_WriteInfos.emplace_back(vk::DescriptorImageInfo{
.sampler = samplerImpl,
.imageView = storageTexture->m_View,
.imageLayout = vk::ImageLayout::eGeneral,
});
m_Writes.push_back({
.dstSet = m_DescriptorSet,
.dstBinding = STORAGE_TEXTURE_BINDING_INDEX,
.dstArrayElement = handle.m_Index,
.descriptorCount = 1,
.descriptorType = vk::DescriptorType::eStorageImage,
.pImageInfo = &m_WriteInfos.back().uImageInfo,
});
m_WriteOwner.emplace_back(HandleType::eStorageTexture, handle.m_Index);
#if !defined(ASTER_NDEBUG)
++m_CommitedStorageTextureCount;
#endif
return {handle};
}
void
GpuResourceManager::Release(StorageTextureHandle handle)
{
if (handle.IsInvalid())
return;
EraseWrites(handle.m_Index, HandleType::eTexture);
m_StorageTextureManager.Release(m_Device, handle);
#if !defined(ASTER_NDEBUG)
--m_CommitedStorageTextureCount;
#endif
}
void
GpuResourceManager::Release(StorageTexture *texture, const StorageTextureHandle handle)
{
assert(texture);
assert(!texture->IsValid());
StorageTexture *internal = m_StorageTextureManager.Fetch(handle);
*texture = *internal;
internal->m_Flags_ &= ~StorageTexture::OWNED_BIT;
Release(handle);
}
void
GpuResourceManager::Update()
{
if (m_Writes.empty() || m_WriteInfos.empty())
return;
m_Device->m_Device.updateDescriptorSets(Cast<u32>(m_Writes.size()), m_Writes.data(), 0, nullptr);
m_Writes.clear();
m_WriteInfos.clear();
m_WriteOwner.clear();
}
GpuResourceManager::GpuResourceManager(Device *device, u16 maxSize)
: m_Device(device)
{
vk::PhysicalDeviceProperties properties;
m_Device->m_PhysicalDevice.getProperties(&properties);
u32 buffersCount = eastl::min(properties.limits.maxPerStageDescriptorStorageBuffers - 1024, Cast<u32>(maxSize));
u32 texturesCount = eastl::min(properties.limits.maxPerStageDescriptorSampledImages - 1024, Cast<u32>(maxSize));
u32 storageTexturesCount =
eastl::min(properties.limits.maxPerStageDescriptorStorageImages - 1024, Cast<u32>(maxSize));
INFO("Max Buffer Count: {}", buffersCount);
INFO("Max Texture Count: {}", texturesCount);
INFO("Max Storage Texture Count: {}", storageTexturesCount);
m_BufferManager.Init(buffersCount);
m_TextureManager.Init(texturesCount);
m_StorageTextureManager.Init(storageTexturesCount);
m_SamplerManager.Init(storageTexturesCount);
m_DefaultSamplerCreateInfo = {
.magFilter = vk::Filter::eLinear,
.minFilter = vk::Filter::eLinear,
.mipmapMode = vk::SamplerMipmapMode::eLinear,
.addressModeU = vk::SamplerAddressMode::eRepeat,
.addressModeV = vk::SamplerAddressMode::eRepeat,
.addressModeW = vk::SamplerAddressMode::eRepeat,
.mipLodBias = 0.0f,
.anisotropyEnable = true,
.maxAnisotropy = properties.limits.maxSamplerAnisotropy,
.compareEnable = false,
.minLod = 0,
.maxLod = VK_LOD_CLAMP_NONE,
.borderColor = vk::BorderColor::eFloatOpaqueBlack,
.unnormalizedCoordinates = false,
};
m_DefaultSampler = m_SamplerManager.Fetch(m_SamplerManager.Create(device, &m_DefaultSamplerCreateInfo));
eastl::array poolSizes = {
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eStorageBuffer,
.descriptorCount = buffersCount,
},
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eCombinedImageSampler,
.descriptorCount = texturesCount,
},
vk::DescriptorPoolSize{
.type = vk::DescriptorType::eStorageImage,
.descriptorCount = storageTexturesCount,
},
};
const vk::DescriptorPoolCreateInfo poolCreateInfo = {
.flags = vk::DescriptorPoolCreateFlagBits::eUpdateAfterBind,
.maxSets = 1,
.poolSizeCount = Cast<u32>(poolSizes.size()),
.pPoolSizes = poolSizes.data(),
};
AbortIfFailed(device->m_Device.createDescriptorPool(&poolCreateInfo, nullptr, &m_DescriptorPool));
vk::DescriptorBindingFlags bindingFlags =
vk::DescriptorBindingFlagBits::ePartiallyBound | vk::DescriptorBindingFlagBits::eUpdateAfterBind;
eastl::array layoutBindingFlags = {
bindingFlags,
bindingFlags,
bindingFlags,
};
vk::DescriptorSetLayoutBindingFlagsCreateInfo bindingFlagsCreateInfo = {
.bindingCount = Cast<u32>(layoutBindingFlags.size()),
.pBindingFlags = layoutBindingFlags.data(),
};
eastl::array descriptorLayoutBindings = {
vk::DescriptorSetLayoutBinding{
.binding = BUFFER_BINDING_INDEX,
.descriptorType = vk::DescriptorType::eStorageBuffer,
.descriptorCount = Cast<u32>(buffersCount),
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
vk::DescriptorSetLayoutBinding{
.binding = TEXTURE_BINDING_INDEX,
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
.descriptorCount = Cast<u32>(texturesCount),
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
vk::DescriptorSetLayoutBinding{
.binding = STORAGE_TEXTURE_BINDING_INDEX,
.descriptorType = vk::DescriptorType::eStorageImage,
.descriptorCount = Cast<u32>(storageTexturesCount),
.stageFlags = vk::ShaderStageFlagBits::eAll,
},
};
static_assert(layoutBindingFlags.size() == descriptorLayoutBindings.size());
const vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = {
.pNext = &bindingFlagsCreateInfo,
.flags = vk::DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool,
.bindingCount = Cast<u32>(descriptorLayoutBindings.size()),
.pBindings = descriptorLayoutBindings.data(),
};
AbortIfFailed(device->m_Device.createDescriptorSetLayout(&descriptorSetLayoutCreateInfo, nullptr, &m_SetLayout));
// One descriptor is enough. Updating it at any time is safe. (Update until submit, data held when pending)
// https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VK_EXT_descriptor_indexing.html
// https://github.com/KhronosGroup/Vulkan-Guide/blob/main/chapters/extensions/VK_EXT_descriptor_indexing.adoc
const vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo = {
.descriptorPool = m_DescriptorPool,
.descriptorSetCount = 1,
.pSetLayouts = &m_SetLayout,
};
AbortIfFailed(device->m_Device.allocateDescriptorSets(&descriptorSetAllocateInfo, &m_DescriptorSet));
m_Device->SetName(m_SetLayout, "Bindless Layout");
m_Device->SetName(m_DescriptorPool, "Bindless Pool");
m_Device->SetName(m_DescriptorSet, "Bindless Set");
}
GpuResourceManager::~GpuResourceManager()
{
#if !defined(ASTER_NDEBUG)
WARN_IF(m_CommitedBufferCount > 0 || m_CommitedTextureCount > 0 || m_CommitedStorageTextureCount > 0,
"Resources alive: SSBO = {}, Textures = {}, RWTexture = {}", m_CommitedBufferCount, m_CommitedTextureCount,
m_CommitedStorageTextureCount);
#endif
m_BufferManager.Destroy(m_Device);
m_TextureManager.Destroy(m_Device);
m_StorageTextureManager.Destroy(m_Device);
m_SamplerManager.Destroy(m_Device);
m_Device->m_Device.destroy(m_DescriptorPool, nullptr);
m_Device->m_Device.destroy(m_SetLayout, nullptr);
}
GpuResourceManager::GpuResourceManager(GpuResourceManager &&other) noexcept
: m_WriteInfos(std::move(other.m_WriteInfos))
, m_Writes(std::move(other.m_Writes))
, m_WriteOwner(std::move(other.m_WriteOwner))
, m_BufferManager(std::move(other.m_BufferManager))
, m_TextureManager(std::move(other.m_TextureManager))
, m_StorageTextureManager(std::move(other.m_StorageTextureManager))
, m_SamplerManager(std::move(other.m_SamplerManager))
, m_Device(Take(other.m_Device))
, m_DescriptorPool(other.m_DescriptorPool)
, m_SetLayout(other.m_SetLayout)
, m_DescriptorSet(other.m_DescriptorSet)
#if !defined(ASTER_NDEBUG)
, m_CommitedBufferCount(other.m_CommitedBufferCount)
, m_CommitedTextureCount(other.m_CommitedTextureCount)
, m_CommitedStorageTextureCount(other.m_CommitedStorageTextureCount)
#endif
{
assert(!other.m_Device);
}
GpuResourceManager &
GpuResourceManager::operator=(GpuResourceManager &&other) noexcept
{
if (this == &other)
return *this;
m_WriteInfos = std::move(other.m_WriteInfos);
m_Writes = std::move(other.m_Writes);
m_WriteOwner = std::move(other.m_WriteOwner);
m_BufferManager = std::move(other.m_BufferManager);
m_TextureManager = std::move(other.m_TextureManager);
m_StorageTextureManager = std::move(other.m_StorageTextureManager);
m_SamplerManager = std::move(other.m_SamplerManager);
m_Device = Take(other.m_Device); // Ensure taken.
m_DescriptorPool = other.m_DescriptorPool;
m_SetLayout = other.m_SetLayout;
m_DescriptorSet = other.m_DescriptorSet;
#if !defined(ASTER_NDEBUG)
m_CommitedBufferCount = other.m_CommitedBufferCount;
m_CommitedTextureCount = other.m_CommitedTextureCount;
m_CommitedStorageTextureCount = other.m_CommitedStorageTextureCount;
#endif
assert(!other.m_Device);
return *this;
}
SamplerHandle
GpuResourceManager::CreateSampler(const vk::SamplerCreateInfo *samplerCreateInfo)
{
return m_SamplerManager.Create(m_Device, samplerCreateInfo);
}

View File

@ -0,0 +1,175 @@
// =============================================
// Aster: gpu_resource_manager.h
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "aster/aster.h"
#include <EASTL/deque.h>
#include <EASTL/vector_map.h>
struct Device;
struct Texture;
struct StorageTexture;
struct StorageBuffer;
struct GpuResourceHandle
{
constexpr static u32 INVALID_HANDLE = MaxValue<u32>;
u32 m_Index = INVALID_HANDLE; // Default = invalid
[[nodiscard]] bool
IsInvalid() const
{
return m_Index == INVALID_HANDLE;
}
};
struct BufferHandle : GpuResourceHandle
{
};
struct TextureHandle : GpuResourceHandle
{
};
struct StorageTextureHandle : GpuResourceHandle
{
};
struct SamplerHandle : GpuResourceHandle
{
};
struct TextureManager
{
eastl::vector<Texture> m_Textures;
u32 m_MaxCapacity;
u32 m_FreeHead;
void Init(u32 maxCapacity);
TextureHandle Commit(Texture *texture);
Texture *Fetch(TextureHandle handle);
void Release(const Device *device, TextureHandle handle);
void Destroy(const Device *device);
};
struct BufferManager
{
eastl::vector<StorageBuffer> m_Buffers;
u32 m_MaxCapacity;
u32 m_FreeHead;
void Init(u32 maxCapacity);
BufferHandle Commit(StorageBuffer *buffer);
StorageBuffer *Fetch(BufferHandle handle);
void Release(const Device *device, BufferHandle handle);
void Destroy(const Device *device);
};
struct StorageTextureManager : TextureManager
{
StorageTextureHandle Commit(StorageTexture *texture);
StorageTexture *Fetch(StorageTextureHandle handle);
void Release(const Device *device, StorageTextureHandle handle);
};
struct SamplerManager
{
// There can only be so many samplers.
eastl::vector<vk::Sampler> m_Samplers;
eastl::vector<usize> m_SamplerHashes;
void Init(usize size);
SamplerHandle Create(const Device *device, const vk::SamplerCreateInfo *createInfo);
vk::Sampler Fetch(SamplerHandle handle);
void Destroy(const Device *device);
};
struct GpuResourceManager
{
private:
union WriteInfo {
vk::DescriptorBufferInfo uBufferInfo;
vk::DescriptorImageInfo uImageInfo;
vk::BufferView uBufferView;
WriteInfo()
{
}
explicit WriteInfo(vk::DescriptorBufferInfo info);
explicit WriteInfo(vk::DescriptorImageInfo info);
explicit WriteInfo(vk::BufferView info);
};
enum class HandleType
{
eBuffer,
eTexture,
eStorageTexture,
};
using WriteOwner = eastl::pair<HandleType, u32>;
eastl::deque<WriteInfo> m_WriteInfos;
eastl::vector<vk::WriteDescriptorSet> m_Writes;
eastl::vector<WriteOwner> m_WriteOwner;
vk::Sampler m_DefaultSampler;
BufferManager m_BufferManager;
TextureManager m_TextureManager;
StorageTextureManager m_StorageTextureManager;
SamplerManager m_SamplerManager;
void EraseWrites(u32 handleIndex, HandleType handleType);
public:
Device *m_Device;
constexpr static u32 BUFFER_BINDING_INDEX = 0;
constexpr static u32 TEXTURE_BINDING_INDEX = 1;
constexpr static u32 STORAGE_TEXTURE_BINDING_INDEX = 2;
vk::SamplerCreateInfo m_DefaultSamplerCreateInfo;
vk::DescriptorPool m_DescriptorPool;
vk::DescriptorSetLayout m_SetLayout;
vk::DescriptorSet m_DescriptorSet;
BufferHandle Commit(StorageBuffer *storageBuffer); // Commit to GPU and take Ownership
void Write(BufferHandle handle, usize offset, usize size, const void *data); // Write to buffer
void Release(BufferHandle handle); // Release and Destroy
void Release(StorageBuffer *storageBuffer, BufferHandle handle); // Release and Return
TextureHandle CommitTexture(Texture *texture, SamplerHandle sampler = {}); // Commit to GPU and take Ownership
void Release(TextureHandle handle); // Release and Destroy
void Release(Texture *texture, TextureHandle handle); // Release and Return
StorageTextureHandle
CommitStorageTexture(StorageTexture *storageTexture, SamplerHandle sampler = {}); // Commit to GPU and take Ownership
void Release(StorageTextureHandle handle); // Release and Destroy
void Release(StorageTexture *texture, StorageTextureHandle handle); // Release and Return
SamplerHandle CreateSampler(const vk::SamplerCreateInfo *samplerCreateInfo);
void Update(); // Update all the descriptors required.
// Ctor/Dtor
GpuResourceManager(Device *device, u16 maxSize);
~GpuResourceManager();
GpuResourceManager(GpuResourceManager &&other) noexcept;
GpuResourceManager &operator=(GpuResourceManager &&other) noexcept;
#if !defined(ASTER_NDEBUG)
usize m_CommitedBufferCount = 0;
usize m_CommitedTextureCount = 0;
usize m_CommitedStorageTextureCount = 0;
#endif
DISALLOW_COPY_AND_ASSIGN(GpuResourceManager);
};

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: ibl_helpers.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "ibl_helpers.h"
@ -9,91 +9,82 @@
#include "aster/core/image.h"
#include "asset_loader.h"
#include "gpu_resource_manager.h"
#include "helpers.h"
#include "aster/systems/commit_manager.h"
#include "aster/systems/rendering_device.h"
#include "pipeline_utils.h"
#include <EASTL/fixed_vector.h>
#include <EASTL/tuple.h>
constexpr auto EQUIRECT_TO_CUBE_SHADER_FILE = "eqrect_to_cube";
constexpr auto ENVIRONMENT_SHADER_FILE = "environment";
constexpr auto DIFFUSE_IRRADIANCE_ENTRY = "diffuseIrradiance";
constexpr auto PREFILTER_ENTRY = "prefilter";
constexpr auto BRDF_LUT_ENTRY = "brdfLut";
constexpr cstr EQUIRECT_TO_CUBE_SHADER_FILE = "shader/eqrect_to_cube.cs.hlsl.spv";
constexpr cstr DIFFUSE_IRRADIANCE_SHADER_FILE = "shader/diffuse_irradiance.cs.hlsl.spv";
constexpr cstr PREFILTER_SHADER_FILE = "shader/prefilter.cs.hlsl.spv";
constexpr cstr BRDF_LUT_SHADER_FILE = "shader/brdf_lut.cs.hlsl.spv";
void
Environment::Destroy(GpuResourceManager *resourceManager)
{
resourceManager->Release(Take(m_Skybox));
resourceManager->Release(Take(m_Diffuse));
resourceManager->Release(Take(m_Prefilter));
resourceManager->Release(Take(m_BrdfLut));
}
Environment
CreateCubeFromHdrEnv(AssetLoader &assetLoader, u32 const cubeSide, systems::ResId<TextureView> hdrEnv)
CreateCubeFromHdrEnv(AssetLoader *assetLoader, vk::Queue computeQueue, const u32 cubeSide, TextureHandle hdrEnv,
const cstr name)
{
systems::RenderingDevice &device = *assetLoader.m_Device;
auto *commitManager = device.m_CommitManager.get();
GpuResourceManager *resMan = assetLoader->m_ResourceManager;
const Device *pDevice = resMan->m_Device;
auto skybox = device.CreateTextureCubeWithView<StorageTextureCubeView>({
.m_Format = vk::Format::eR16G16B16A16Sfloat,
.m_Side = cubeSide,
.m_Name = "Skybox",
.m_IsSampled = true,
.m_IsMipMapped = true,
.m_IsStorage = true,
});
vk::SamplerCreateInfo brdfLutSamplerCreateInfo = resMan->m_DefaultSamplerCreateInfo;
brdfLutSamplerCreateInfo.addressModeU = vk::SamplerAddressMode::eClampToEdge;
brdfLutSamplerCreateInfo.addressModeV = vk::SamplerAddressMode::eClampToEdge;
brdfLutSamplerCreateInfo.addressModeW = vk::SamplerAddressMode::eClampToEdge;
auto skyboxHandle = commitManager->CommitTexture(skybox);
auto skyboxStorageHandle = commitManager->CommitStorageImage(skybox);
StorageTextureCube skybox;
StorageTextureCube diffuseIrradiance;
StorageTextureCube prefilterCube;
StorageTexture brdfLut;
SamplerHandle brdfLutSampler;
auto diffuseIrradiance = device.CreateTextureCubeWithView<StorageTextureCubeView>({
.m_Format = vk::Format::eR16G16B16A16Sfloat,
.m_Side = 64,
.m_Name = "Diffuse Irradiance",
.m_IsSampled = true,
.m_IsMipMapped = false,
.m_IsStorage = true,
});
auto diffuseIrradianceHandle = commitManager->CommitTexture(diffuseIrradiance);
auto diffuseIrradianceStorageHandle = commitManager->CommitStorageImage(diffuseIrradiance);
skybox.Init(pDevice, cubeSide, vk::Format::eR16G16B16A16Sfloat, true, true, "Skybox");
TextureHandle skyboxHandle = resMan->CommitTexture(&skybox);
StorageTextureHandle skyboxStorageHandle = resMan->CommitStorageTexture(&skybox);
auto prefilterCube = device.CreateTextureCubeWithView<StorageTextureCubeView>({
.m_Format = vk::Format::eR16G16B16A16Sfloat,
.m_Side = cubeSide,
.m_Name = "Prefilter",
.m_IsSampled = true,
.m_IsMipMapped = true,
.m_IsStorage = true,
});
auto prefilterHandle = commitManager->CommitTexture(prefilterCube); // This stores the original view for us.
diffuseIrradiance.Init(pDevice, 64, vk::Format::eR16G16B16A16Sfloat, true, false, "Diffuse Irradiance");
TextureHandle diffuseIrradianceHandle = resMan->CommitTexture(&diffuseIrradiance);
StorageTextureHandle diffuseIrradianceStorageHandle = resMan->CommitStorageTexture(&diffuseIrradiance);
prefilterCube.Init(pDevice, cubeSide, vk::Format::eR16G16B16A16Sfloat, true, true, "Prefilter");
TextureHandle prefilterHandle = resMan->CommitTexture(&prefilterCube); // This stores the original view for us.
constexpr u32 prefilterMipCountMax = 6;
eastl::fixed_vector<systems::ResId<StorageImageView>, prefilterMipCountMax> prefilterStorageHandles;
eastl::array<StorageTextureHandle, prefilterMipCountMax> prefilterStorageHandles;
// All non-owning copies.
for (u8 mipLevel = 0; mipLevel < prefilterMipCountMax; ++mipLevel)
for (u32 mipLevel = 0; auto &tex : prefilterStorageHandles)
{
auto view = device.CreateView<StorageTextureCubeView>({
.m_Image = systems::CastImage<StorageTextureCube>(prefilterCube->m_Image),
.m_ViewType = vk::ImageViewType::eCube,
.m_AspectMask = vk::ImageAspectFlagBits::eColor,
.m_MipLevelCount = 1,
.m_LayerCount = 6,
.m_BaseMipLevel = mipLevel,
.m_BaseLayer = 0,
});
prefilterStorageHandles.push_back(commitManager->CommitStorageImage(view));
vk::ImageViewCreateInfo imageViewCreateInfo = {
.image = prefilterCube.m_Image,
.viewType = vk::ImageViewType::eCube,
.format = vk::Format::eR16G16B16A16Sfloat,
.components = vk::ComponentMapping{},
.subresourceRange =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = mipLevel++,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 6,
},
};
AbortIfFailed(pDevice->m_Device.createImageView(&imageViewCreateInfo, nullptr, &prefilterCube.m_View));
tex = resMan->CommitStorageTexture(&prefilterCube);
}
auto brdfLut = device.CreateTexture2DWithView<StorageTextureView>({
.m_Format = vk::Format::eR16G16Sfloat,
.m_Extent = {512, 512},
.m_Name = "BRDF LUT",
.m_IsSampled = true,
.m_IsMipMapped = false,
.m_IsStorage = true,
});
auto brdfLutSampler = device.CreateSampler({
.m_AddressModeU = vk::SamplerAddressMode::eClampToEdge,
.m_AddressModeV = vk::SamplerAddressMode::eClampToEdge,
.m_AddressModeW = vk::SamplerAddressMode::eClampToEdge,
});
auto brdfLutHandle = commitManager->CommitTexture(brdfLut, brdfLutSampler);
auto brdfLutStorageHandle = commitManager->CommitStorageImage(brdfLut);
brdfLut.Init(pDevice, {512, 512}, vk::Format::eR16G16Sfloat, true, "BRDF LUT");
brdfLutSampler = resMan->CreateSampler(&brdfLutSamplerCreateInfo);
TextureHandle brdfLutHandle = resMan->CommitTexture(&brdfLut, brdfLutSampler);
StorageTextureHandle brdfLutStorageHandle = resMan->CommitStorageTexture(&brdfLut);
#pragma region Dependencies and Copies
vk::ImageSubresourceRange cubeSubresRange = {
@ -123,14 +114,14 @@ CreateCubeFromHdrEnv(AssetLoader &assetLoader, u32 const cubeSide, systems::ResI
.subresourceRange = cubeSubresRange,
};
eastl::fixed_vector<vk::ImageMemoryBarrier2, 4> readyToWriteBarriers(4, readyToWriteBarrierTemplate);
readyToWriteBarriers[0].image = skybox->GetImage();
readyToWriteBarriers[1].image = diffuseIrradiance->GetImage();
readyToWriteBarriers[2].image = prefilterCube->GetImage();
readyToWriteBarriers[3].image = brdfLut->GetImage();
readyToWriteBarriers[0].image = skybox.m_Image;
readyToWriteBarriers[1].image = diffuseIrradiance.m_Image;
readyToWriteBarriers[2].image = prefilterCube.m_Image;
readyToWriteBarriers[3].image = brdfLut.m_Image;
readyToWriteBarriers[3].subresourceRange = lutSubresRange;
vk::DependencyInfo readyToWriteDependency = {
.imageMemoryBarrierCount = static_cast<u32>(readyToWriteBarriers.size()),
.imageMemoryBarrierCount = Cast<u32>(readyToWriteBarriers.size()),
.pImageMemoryBarriers = readyToWriteBarriers.data(),
};
@ -145,16 +136,16 @@ CreateCubeFromHdrEnv(AssetLoader &assetLoader, u32 const cubeSide, systems::ResI
.subresourceRange = cubeSubresRange,
};
auto skyboxToSampleBarrier = readyToSampleBarrierTemplate;
skyboxToSampleBarrier.image = skybox->GetImage();
skyboxToSampleBarrier.image = skybox.m_Image;
auto diffIrrToSampleBarrier = readyToSampleBarrierTemplate;
diffIrrToSampleBarrier.image = diffuseIrradiance->GetImage();
diffIrrToSampleBarrier.image = diffuseIrradiance.m_Image;
auto prefilterToSampleBarrier = readyToSampleBarrierTemplate;
prefilterToSampleBarrier.image = prefilterCube->GetImage();
prefilterToSampleBarrier.image = prefilterCube.m_Image;
auto brdfToSampleBarrier = readyToSampleBarrierTemplate;
prefilterToSampleBarrier.image = brdfLut->GetImage();
prefilterToSampleBarrier.image = brdfLut.m_Image;
prefilterToSampleBarrier.subresourceRange = lutSubresRange;
vk::DependencyInfo skyboxToSampleDependency = {
@ -178,144 +169,103 @@ CreateCubeFromHdrEnv(AssetLoader &assetLoader, u32 const cubeSide, systems::ResI
struct SkyboxPushConstants
{
systems::ResId<TextureView> m_HdrEnvHandle;
systems::ResId<StorageImageView> m_OutputTexture;
TextureHandle m_HdrEnvHandle;
StorageTextureHandle m_OutputTexture;
u32 m_CubeSide;
};
struct DiffuseIrradiancePushConstants
{
systems::ResId<TextureView> m_SkyboxHandle;
systems::ResId<StorageImageView> m_OutputTexture;
TextureHandle m_SkyboxHandle;
StorageTextureHandle m_OutputTexture;
u32 m_CubeSide;
};
struct PrefilterPushConstants
{
systems::ResId<TextureView> m_SkyboxHandle;
systems::ResId<StorageImageView> m_OutputTexture;
TextureHandle m_SkyboxHandle;
StorageTextureHandle m_OutputTexture;
u32 m_CubeSide;
f32 m_Roughness;
u32 m_EnvSide;
};
struct BrdfLutPushConstants
{
systems::ResId<StorageImageView> m_OutputTexture;
StorageTextureHandle m_OutputTexture;
};
#pragma region Pipeline Creation etc
// vk::PushConstantRange pcr = {
// .stageFlags = vk::ShaderStageFlagBits::eCompute,
// .offset = 0,
// .size = static_cast<u32>(
// eastl::max(eastl::max(sizeof(SkyboxPushConstants), sizeof(BrdfLutPushConstants)),
// eastl::max(sizeof(DiffuseIrradiancePushConstants), sizeof(PrefilterPushConstants)))),
// };
vk::PushConstantRange pcr = {
.stageFlags = vk::ShaderStageFlagBits::eCompute,
.offset = 0,
.size = Cast<u32>(eastl::max(eastl::max(sizeof(SkyboxPushConstants), sizeof(BrdfLutPushConstants)),
eastl::max(sizeof(DiffuseIrradiancePushConstants), sizeof(PrefilterPushConstants)))),
};
// vk::PipelineLayout pipelineLayout;
// const vk::PipelineLayoutCreateInfo layoutCreateInfo = {
// .setLayoutCount = 1,
// .pSetLayouts = &commitManager->GetDescriptorSetLayout(),
// .pushConstantRangeCount = 1,
// .pPushConstantRanges = &pcr,
// };
// AbortIfFailed(device.m_Device->createPipelineLayout(&layoutCreateInfo, nullptr, &pipelineLayout));
vk::PipelineLayout pipelineLayout;
const vk::PipelineLayoutCreateInfo layoutCreateInfo = {
.setLayoutCount = 1,
.pSetLayouts = &resMan->m_SetLayout,
.pushConstantRangeCount = 1,
.pPushConstantRanges = &pcr,
};
AbortIfFailed(pDevice->m_Device.createPipelineLayout(&layoutCreateInfo, nullptr, &pipelineLayout));
// const auto eqRectToCubeShader = CreateShader(pDevice, EQUIRECT_TO_CUBE_SHADER_FILE);
// const auto diffuseRadianceShader = CreateShader(pDevice, DIFFUSE_IRRADIANCE_SHADER_FILE);
// const auto prefilterShader = CreateShader(pDevice, PREFILTER_SHADER_FILE);
// const auto brdfLutShader = CreateShader(pDevice, BRDF_LUT_SHADER_FILE);
// eastl::array computePipelineCreateInfo = {
// vk::ComputePipelineCreateInfo{
// .stage =
// {
// .stage = vk::ShaderStageFlagBits::eCompute,
// .module = eqRectToCubeShader,
// .pName = "main",
// },
// .layout = pipelineLayout,
// },
// vk::ComputePipelineCreateInfo{
// .stage =
// {
// .stage = vk::ShaderStageFlagBits::eCompute,
// .module = diffuseRadianceShader,
// .pName = "main",
// },
// .layout = pipelineLayout,
// },
// vk::ComputePipelineCreateInfo{
// .stage =
// {
// .stage = vk::ShaderStageFlagBits::eCompute,
// .module = prefilterShader,
// .pName = "main",
// },
// .layout = pipelineLayout,
// },
// vk::ComputePipelineCreateInfo{
// .stage =
// {
// .stage = vk::ShaderStageFlagBits::eCompute,
// .module = brdfLutShader,
// .pName = "main",
// },
// .layout = pipelineLayout,
// },
// };
// eastl::array<vk::Pipeline, computePipelineCreateInfo.size()> pipelines;
// AbortIfFailed(pDevice->m_Device.createComputePipelines(
// pDevice->m_PipelineCache, static_cast<u32>(computePipelineCreateInfo.size()),
// computePipelineCreateInfo.data(), nullptr, pipelines.data()));
// vk::Pipeline eqRectToCubePipeline = pipelines[0];
// vk::Pipeline diffuseIrradiancePipeline = pipelines[1];
// vk::Pipeline prefilterPipeline = pipelines[2];
// vk::Pipeline brdfLutPipeline = pipelines[3];
Pipeline eqRectToCubePipeline;
if (auto result =
device.CreateComputePipeline(eqRectToCubePipeline, {
.m_Shader =
const auto eqRectToCubeShader = CreateShader(pDevice, EQUIRECT_TO_CUBE_SHADER_FILE);
const auto diffuseRadianceShader = CreateShader(pDevice, DIFFUSE_IRRADIANCE_SHADER_FILE);
const auto prefilterShader = CreateShader(pDevice, PREFILTER_SHADER_FILE);
const auto brdfLutShader = CreateShader(pDevice, BRDF_LUT_SHADER_FILE);
eastl::array computePipelineCreateInfo = {
vk::ComputePipelineCreateInfo{
.stage =
{
.m_ShaderFile = EQUIRECT_TO_CUBE_SHADER_FILE,
.m_EntryPoints = {"main"},
.stage = vk::ShaderStageFlagBits::eCompute,
.module = eqRectToCubeShader,
.pName = "main",
},
.m_Name = "EqRect -> Cubemap",
}))
.layout = pipelineLayout,
},
vk::ComputePipelineCreateInfo{
.stage =
{
ERROR("EqRect -> Cubemap Pipeline Creation failed. Cause: {}", result.What()) THEN_ABORT(result.Value());
}
Pipeline diffuseIrradiancePipeline;
if (auto result = device.CreateComputePipeline(
diffuseIrradiancePipeline,
{{.m_ShaderFile = ENVIRONMENT_SHADER_FILE, .m_EntryPoints = {DIFFUSE_IRRADIANCE_ENTRY}},
"DiffuseIrradiance"}))
.stage = vk::ShaderStageFlagBits::eCompute,
.module = diffuseRadianceShader,
.pName = "main",
},
.layout = pipelineLayout,
},
vk::ComputePipelineCreateInfo{
.stage =
{
ERROR("Diffuse Irradiance compute pipeline creation failed. Cause: {}", result.What())
THEN_ABORT(result.Value());
}
Pipeline prefilterPipeline;
if (auto result = device.CreateComputePipeline(
prefilterPipeline,
{{.m_ShaderFile = ENVIRONMENT_SHADER_FILE, .m_EntryPoints = {PREFILTER_ENTRY}}, "Prefilter"}))
.stage = vk::ShaderStageFlagBits::eCompute,
.module = prefilterShader,
.pName = "main",
},
.layout = pipelineLayout,
},
vk::ComputePipelineCreateInfo{
.stage =
{
ERROR("Prefilter compute pipeline creation failed. Cause: {}", result.What())
THEN_ABORT(result.Value());
}
.stage = vk::ShaderStageFlagBits::eCompute,
.module = brdfLutShader,
.pName = "main",
},
.layout = pipelineLayout,
},
};
Pipeline brdfLutPipeline;
eastl::array<vk::Pipeline, computePipelineCreateInfo.size()> pipelines;
AbortIfFailed(pDevice->m_Device.createComputePipelines(pDevice->m_PipelineCache, Cast<u32>(computePipelineCreateInfo.size()),
computePipelineCreateInfo.data(), nullptr,
pipelines.data()));
if (auto result = device.CreateComputePipeline(
brdfLutPipeline, {{.m_ShaderFile = ENVIRONMENT_SHADER_FILE, .m_EntryPoints = {BRDF_LUT_ENTRY}}, "BRDF"}))
vk::Pipeline eqRectToCubePipeline = pipelines[0];
vk::Pipeline diffuseIrradiancePipeline = pipelines[1];
vk::Pipeline prefilterPipeline = pipelines[2];
vk::Pipeline brdfLutPipeline = pipelines[3];
for (auto &createInfos : computePipelineCreateInfo)
{
ERROR("BRDF LUT compute pipeline creation failed. Cause: {}", result.What())
THEN_ABORT(result.Value());
pDevice->m_Device.destroy(createInfos.stage.module, nullptr);
}
#pragma endregion
@ -328,68 +278,115 @@ CreateCubeFromHdrEnv(AssetLoader &assetLoader, u32 const cubeSide, systems::ResI
DiffuseIrradiancePushConstants diffuseIrradiancePushConstants = {
.m_SkyboxHandle = skyboxHandle,
.m_OutputTexture = diffuseIrradianceStorageHandle,
.m_CubeSide = diffuseIrradiance->m_Extent.width,
.m_CubeSide = diffuseIrradiance.m_Extent.width,
};
PrefilterPushConstants prefilterPushConstants = {
.m_SkyboxHandle = skyboxHandle,
.m_OutputTexture = systems::NullId{},
.m_EnvSide = cubeSide,
};
BrdfLutPushConstants brdfLutPushConstants = {
.m_OutputTexture = brdfLutStorageHandle,
};
commitManager->Update();
resMan->Update();
auto context = assetLoader.m_Device->CreateComputeContext();
auto cmd = assetLoader->m_CommandBuffer;
constexpr vk::CommandBufferBeginInfo beginInfo = {.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit};
AbortIfFailed(cmd.begin(&beginInfo));
context.Begin();
#if !defined(ASTER_NDEBUG)
StackString<128> labelName = "Eqrect -> Cubemap: ";
labelName += name ? name : "<unknown env>";
vk::DebugUtilsLabelEXT label = {
.pLabelName = labelName.c_str(),
.color = std::array{1.0f, 1.0f, 1.0f, 1.0f},
};
cmd.beginDebugUtilsLabelEXT(&label);
#endif
context.BeginDebugRegion("Eqrect -> Cubemap");
cmd.pipelineBarrier2(&readyToWriteDependency);
context.Dependency(readyToWriteDependency);
cmd.bindDescriptorSets(vk::PipelineBindPoint::eCompute, pipelineLayout, 0, 1, &resMan->m_DescriptorSet, 0, nullptr);
cmd.bindPipeline(vk::PipelineBindPoint::eCompute, eqRectToCubePipeline);
cmd.pushConstants(pipelineLayout, vk::ShaderStageFlagBits::eCompute, 0, sizeof skyboxPushConstant,
&skyboxPushConstant);
assert(skybox.m_Extent.width % 16 == 0 && skybox.m_Extent.height % 16 == 0);
cmd.dispatch(skybox.m_Extent.width / 16, skybox.m_Extent.height / 16, 6);
assert(skybox->m_Extent.width % 16 == 0 && skybox->m_Extent.height % 16 == 0);
context.Dispatch(eqRectToCubePipeline, skybox->m_Extent.width / 16, skybox->m_Extent.height / 16, 6,
skyboxPushConstant);
GenerateMipMaps(context, skybox, vk::ImageLayout::eGeneral, vk::ImageLayout::eGeneral,
GenerateMipMaps(cmd, &skybox, vk::ImageLayout::eGeneral, vk::ImageLayout::eGeneral,
vk::PipelineStageFlagBits2::eComputeShader, vk::PipelineStageFlagBits2::eComputeShader);
assert(diffuseIrradiance->m_Extent.width % 16 == 0 && diffuseIrradiance->m_Extent.height % 16 == 0);
context.Dispatch(diffuseIrradiancePipeline, diffuseIrradiance->m_Extent.width / 16,
diffuseIrradiance->m_Extent.width / 16, 6, diffuseIrradiancePushConstants);
cmd.bindPipeline(vk::PipelineBindPoint::eCompute, diffuseIrradiancePipeline);
cmd.pushConstants(pipelineLayout, vk::ShaderStageFlagBits::eCompute, 0, sizeof skyboxPushConstant,
&diffuseIrradiancePushConstants);
assert(diffuseIrradiance.m_Extent.width % 16 == 0 && diffuseIrradiance.m_Extent.height % 16 == 0);
cmd.dispatch(diffuseIrradiance.m_Extent.width / 16, diffuseIrradiance.m_Extent.width / 16, 6);
context.Dependency(diffIrrToSampleDependency);
cmd.pipelineBarrier2(&diffIrrToSampleDependency);
u32 mipSize = prefilterCube->m_Extent.width;
cmd.bindPipeline(vk::PipelineBindPoint::eCompute, prefilterPipeline);
u32 mipSize = prefilterCube.m_Extent.width;
assert(mipSize % 16 == 0);
for (u32 mipCount = 0; auto &tex : prefilterStorageHandles)
{
prefilterPushConstants.m_OutputTexture = tex;
prefilterPushConstants.m_CubeSide = mipSize;
prefilterPushConstants.m_Roughness = static_cast<f32>(mipCount) / static_cast<f32>(prefilterMipCountMax - 1);
prefilterPushConstants.m_Roughness = Cast<f32>(mipCount) / Cast<f32>(prefilterMipCountMax);
cmd.pushConstants(pipelineLayout, vk::ShaderStageFlagBits::eCompute, 0, sizeof prefilterPushConstants,
&prefilterPushConstants);
u32 groupCount = eastl::max(mipSize / 16u, 1u);
context.Dispatch(prefilterPipeline, groupCount, groupCount, 6, prefilterPushConstants);
cmd.dispatch(groupCount, groupCount, 6);
++mipCount;
mipSize = mipSize >> 1;
}
context.Dependency(skyboxToSampleDependency);
context.Dependency(prefilterToSampleDependency);
cmd.pipelineBarrier2(&skyboxToSampleDependency);
cmd.pipelineBarrier2(&prefilterToSampleDependency);
assert(brdfLut->m_Extent.width % 16 == 0 && brdfLut->m_Extent.height % 16 == 0);
context.Dispatch(brdfLutPipeline, brdfLut->m_Extent.width / 16, brdfLut->m_Extent.height / 16, 1,
brdfLutPushConstants);
cmd.bindPipeline(vk::PipelineBindPoint::eCompute, brdfLutPipeline);
cmd.pushConstants(pipelineLayout, vk::ShaderStageFlagBits::eCompute, 0, sizeof brdfLutPushConstants,
&brdfLutPushConstants);
assert(brdfLut.m_Extent.width % 16 == 0 && brdfLut.m_Extent.height % 16 == 0);
cmd.dispatch(brdfLut.m_Extent.width / 16, brdfLut.m_Extent.height / 16, 1);
context.EndDebugRegion();
#if !defined(ASTER_NDEBUG)
cmd.endDebugUtilsLabelEXT();
#endif
context.End();
AbortIfFailed(cmd.end());
auto receipt = device.Submit(context);
device.WaitOn(receipt);
vk::SubmitInfo submitInfo = {
.waitSemaphoreCount = 0,
.pWaitDstStageMask = nullptr,
.commandBufferCount = 1,
.pCommandBuffers = &cmd,
};
vk::Fence fence;
vk::FenceCreateInfo fenceCreateInfo = {};
AbortIfFailed(pDevice->m_Device.createFence(&fenceCreateInfo, nullptr, &fence));
AbortIfFailed(computeQueue.submit(1, &submitInfo, fence));
AbortIfFailed(pDevice->m_Device.waitForFences(1, &fence, true, MaxValue<u32>));
pDevice->m_Device.destroy(fence, nullptr);
AbortIfFailed(pDevice->m_Device.resetCommandPool(assetLoader->m_CommandPool, {}));
skybox = {};
resMan->Release(skyboxStorageHandle);
resMan->Release(diffuseIrradianceStorageHandle);
resMan->Release(brdfLutStorageHandle);
for (auto &texHandles : prefilterStorageHandles)
{
StorageTextureCube st;
resMan->Release(&st, texHandles);
pDevice->m_Device.destroy(st.m_View, nullptr);
}
for (auto &pipeline : pipelines)
{
pDevice->m_Device.destroy(pipeline, nullptr);
}
pDevice->m_Device.destroy(pipelineLayout, nullptr);
return {
.m_Skybox = skyboxHandle,

View File

@ -1,14 +1,12 @@
// =============================================
// Aster: ibl_helpers.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
#include "aster/aster.h"
#include "aster/core/image.h"
#include "aster/core/image_view.h"
#include "aster/systems/resource.h"
#include "gpu_resource_manager.h"
struct Pipeline;
struct Texture;
@ -17,10 +15,14 @@ struct AssetLoader;
struct Environment
{
systems::ResId<TextureView> m_Skybox;
systems::ResId<TextureView> m_Diffuse;
systems::ResId<TextureView> m_Prefilter;
systems::ResId<TextureView> m_BrdfLut;
TextureHandle m_Skybox;
TextureHandle m_Diffuse;
TextureHandle m_Prefilter;
TextureHandle m_BrdfLut;
void Destroy(GpuResourceManager *resourceManager);
};
Environment CreateCubeFromHdrEnv(AssetLoader &assetLoader, u32 cubeSide, systems::ResId<TextureView> hdrEnv);
Environment
CreateCubeFromHdrEnv(AssetLoader *assetLoader, vk::Queue computeQueue, u32 cubeSide, TextureHandle hdrEnv,
cstr name = nullptr);

View File

@ -1,16 +1,35 @@
// =============================================
// Aster: light_manager.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#include "light_manager.h"
#include "aster/core/buffer.h"
#include "aster/systems/commit_manager.h"
#include "aster/systems/rendering_device.h"
#include "aster/systems/resource.h"
#include "glm/ext/matrix_transform.hpp"
struct Light
{
union {
vec3 um_Position;
vec3 um_Direction;
};
f32 m_Range; // < 0.0 for invalid
u32 m_Color_; // LSB is used for flags. (R G B Flags)
f32 m_Intensity;
constexpr static u32 MAX_GEN = 0x40;
constexpr static u32 GEN_MASK = MAX_GEN - 1;
constexpr static u32 TYPE_MASK = 0xC0;
constexpr static u32 TYPE_INVALID = 0x0;
constexpr static u32 TYPE_DIRECTIONAL = 1 << 6;
constexpr static u32 TYPE_POINT = 2 << 6;
constexpr static u32 TYPE_SPOT = 3 << 6; // Currently Unused
constexpr static u32 COLOR_MASK = ~(GEN_MASK | TYPE_MASK);
};
// Static Checks
// Ensure layouts are exact.
@ -34,29 +53,29 @@ static_assert((Light::TYPE_MASK & Light::TYPE_SPOT) == Light::TYPE_SPOT);
static_assert(Light::COLOR_MASK == 0xFFFFFF00);
inline u32
ToColor32(vec4 const &col)
ToColor32(const vec4 &col)
{
u32 const r = static_cast<u32>(eastl::min(col.r, 1.0f) * 255.99f);
u32 const g = static_cast<u32>(eastl::min(col.g, 1.0f) * 255.99f);
u32 const b = static_cast<u32>(eastl::min(col.b, 1.0f) * 255.99f);
u32 const a = static_cast<u32>(eastl::min(col.a, 1.0f) * 255.99f);
const u32 r = Cast<u32>(eastl::min(col.r, 1.0f) * 255.99f);
const u32 g = Cast<u32>(eastl::min(col.g, 1.0f) * 255.99f);
const u32 b = Cast<u32>(eastl::min(col.b, 1.0f) * 255.99f);
const u32 a = Cast<u32>(eastl::min(col.a, 1.0f) * 255.99f);
return r << 24 | g << 16 | b << 8 | a;
}
inline u32
ToColor32(vec3 const &col)
ToColor32(const vec3 &col)
{
u32 const r = static_cast<u32>(eastl::min(col.r, 1.0f) * 255.99f);
u32 const g = static_cast<u32>(eastl::min(col.g, 1.0f) * 255.99f);
u32 const b = static_cast<u32>(eastl::min(col.b, 1.0f) * 255.99f);
const u32 r = Cast<u32>(eastl::min(col.r, 1.0f) * 255.99f);
const u32 g = Cast<u32>(eastl::min(col.g, 1.0f) * 255.99f);
const u32 b = Cast<u32>(eastl::min(col.b, 1.0f) * 255.99f);
constexpr u32 a = 255;
return r << 24 | g << 16 | b << 8 | a;
}
LightManager::LightManager(systems::RenderingDevice &device)
: m_Device{&device}
LightManager::LightManager(GpuResourceManager *resourceManager)
: m_ResourceManager{resourceManager}
, m_DirectionalLightCount{}
, m_PointLightCount{}
, m_MetaInfo{}
@ -64,10 +83,41 @@ LightManager::LightManager(systems::RenderingDevice &device)
{
}
LightHandle
LightManager::AddDirectional(vec3 const &direction, vec3 const &color, f32 intensity)
LightManager::~LightManager()
{
vec3 const normDirection = normalize(direction);
m_ResourceManager->Release(m_MetaInfo.m_LightBuffer);
}
LightManager::LightManager(LightManager &&other) noexcept
: m_ResourceManager(other.m_ResourceManager)
, m_Lights(std::move(other.m_Lights))
, m_DirectionalLightCount(other.m_DirectionalLightCount)
, m_PointLightCount(other.m_PointLightCount)
, m_MetaInfo(other.m_MetaInfo)
, m_GpuBufferCapacity_(other.m_GpuBufferCapacity_)
{
other.m_MetaInfo.m_LightBuffer = {};
}
LightManager &
LightManager::operator=(LightManager &&other) noexcept
{
if (this == &other)
return *this;
m_ResourceManager = other.m_ResourceManager;
m_Lights = std::move(other.m_Lights);
m_DirectionalLightCount = other.m_DirectionalLightCount;
m_PointLightCount = other.m_PointLightCount;
m_MetaInfo = other.m_MetaInfo;
other.m_MetaInfo.m_LightBuffer = {};
m_GpuBufferCapacity_ = other.m_GpuBufferCapacity_;
return *this;
}
LightHandle
LightManager::AddDirectional(const vec3 &direction, const vec3 &color, f32 intensity)
{
const vec3 normDirection = normalize(direction);
if (m_DirectionalLightCount < m_MetaInfo.m_DirectionalLightMaxCount)
{
u16 index = 0;
@ -75,7 +125,7 @@ LightManager::AddDirectional(vec3 const &direction, vec3 const &color, f32 inten
{
if (light.m_Range < 0)
{
u8 const gen = light.m_Color_ & Light::GEN_MASK;
const u8 gen = light.m_Color_ & Light::GEN_MASK;
light.m_Color_ = (ToColor32(color) & Light::COLOR_MASK) | Light::TYPE_DIRECTIONAL | gen;
light.m_Range = 1.0f;
@ -95,8 +145,8 @@ LightManager::AddDirectional(vec3 const &direction, vec3 const &color, f32 inten
if (m_DirectionalLightCount == m_MetaInfo.m_DirectionalLightMaxCount &&
m_MetaInfo.m_DirectionalLightMaxCount == m_MetaInfo.m_PointLightOffset)
{
u16 const oldPointLightOffset = m_MetaInfo.m_PointLightOffset;
u32 const pointLightMaxCount = m_MetaInfo.m_PointLightMaxCount;
const u16 oldPointLightOffset = m_MetaInfo.m_PointLightOffset;
const u32 pointLightMaxCount = m_MetaInfo.m_PointLightMaxCount;
// Might cause a capacity increase, but I want to use that for my gpu buffer resize.
m_Lights.push_back();
m_Lights.push_back();
@ -122,7 +172,7 @@ LightManager::AddDirectional(vec3 const &direction, vec3 const &color, f32 inten
m_Lights[m_DirectionalLightCount].m_Range = 1.0f;
m_Lights[m_DirectionalLightCount].um_Direction = normDirection;
m_Lights[m_DirectionalLightCount].m_Intensity = intensity;
u16 const index = m_DirectionalLightCount;
const u16 index = m_DirectionalLightCount;
++m_DirectionalLightCount;
++m_MetaInfo.m_DirectionalLightMaxCount;
@ -131,7 +181,7 @@ LightManager::AddDirectional(vec3 const &direction, vec3 const &color, f32 inten
}
LightHandle
LightManager::AddPoint(vec3 const &position, vec3 const &color, f32 const radius, f32 intensity)
LightManager::AddPoint(const vec3 &position, const vec3 &color, const f32 radius, f32 intensity)
{
assert(m_PointLightCount <= m_MetaInfo.m_PointLightMaxCount);
assert(radius >= 0.0f);
@ -142,7 +192,7 @@ LightManager::AddPoint(vec3 const &position, vec3 const &color, f32 const radius
{
if (light->m_Range < 0)
{
u8 const gen = light->m_Color_ & Light::GEN_MASK;
const u8 gen = light->m_Color_ & Light::GEN_MASK;
light->m_Color_ = (ToColor32(color) & Light::COLOR_MASK) | Light::TYPE_POINT | gen;
light->m_Range = radius;
@ -151,7 +201,7 @@ LightManager::AddPoint(vec3 const &position, vec3 const &color, f32 const radius
m_GpuBufferCapacity_ |= UPDATE_REQUIRED_BIT;
return {Light::TYPE_POINT, gen, static_cast<u16>(index)};
return {Light::TYPE_POINT, gen, Cast<u16>(index)};
}
++light;
}
@ -160,7 +210,7 @@ LightManager::AddPoint(vec3 const &position, vec3 const &color, f32 const radius
}
m_Lights.push_back();
u16 const index = m_PointLightCount;
const u16 index = m_PointLightCount;
Light *light = &m_Lights[index + m_MetaInfo.m_PointLightOffset];
constexpr u8 gen = 0; // New light
@ -181,29 +231,31 @@ LightManager::AddPoint(vec3 const &position, vec3 const &color, f32 const radius
void
LightManager::Update()
{
u16 const requiredBufferCapacity = eastl::min(static_cast<u16>(m_Lights.capacity()), MAX_LIGHTS);
const u16 requiredBufferCapacity = eastl::min(Cast<u16>(m_Lights.capacity()), MAX_LIGHTS);
if ((m_GpuBufferCapacity_ & CAPACITY_MASK) < requiredBufferCapacity)
{
m_LightBuffer = m_Device->CreateStorageBuffer(requiredBufferCapacity * sizeof m_Lights[0], "Light Buffer");
StorageBuffer newBuffer;
newBuffer.Init(m_ResourceManager->m_Device, requiredBufferCapacity * sizeof m_Lights[0], true, "Light Buffer");
m_GpuBufferCapacity_ = requiredBufferCapacity | UPDATE_REQUIRED_BIT;
m_MetaInfo.m_LightBuffer = m_LightBuffer->GetDeviceAddress();
m_ResourceManager->Release(m_MetaInfo.m_LightBuffer);
m_MetaInfo.m_LightBuffer = m_ResourceManager->Commit(&newBuffer);
}
if (m_GpuBufferCapacity_ & UPDATE_REQUIRED_BIT)
{
m_LightBuffer->Write(0, m_Lights.size() * sizeof m_Lights[0], m_Lights.data());
m_ResourceManager->Write(m_MetaInfo.m_LightBuffer, 0, m_Lights.size() * sizeof m_Lights[0], m_Lights.data());
}
}
void
LightManager::RemoveLight(LightHandle const handle)
LightManager::RemoveLight(const LightHandle handle)
{
u8 const handleGen = handle.m_Generation;
const u8 handleGen = handle.m_Generation;
if (handle.m_Type == Light::TYPE_DIRECTIONAL)
{
Light *lightSlot = &m_Lights[handle.m_Index];
u8 const slotGen = lightSlot->m_Color_ & Light::GEN_MASK;
const u8 slotGen = lightSlot->m_Color_ & Light::GEN_MASK;
if (slotGen > handleGen)
{
WARN("Invalid handle gen: {} being freed. (slot gen: {})", handleGen, slotGen);
@ -218,7 +270,7 @@ LightManager::RemoveLight(LightHandle const handle)
if (handle.m_Type == Light::TYPE_POINT)
{
Light *lightSlot = &m_Lights[handle.m_Index + m_MetaInfo.m_PointLightOffset];
u8 const slotGen = lightSlot->m_Color_ & Light::GEN_MASK;
const u8 slotGen = lightSlot->m_Color_ & Light::GEN_MASK;
if (slotGen > handleGen)
{
WARN("Invalid handle gen: {} being freed. (slot gen: {})", handleGen, slotGen);

View File

@ -1,6 +1,6 @@
// =============================================
// Aster: light_manager.h
// Copyright (c) 2020-2025 Anish Bhobe
// Copyright (c) 2020-2024 Anish Bhobe
// =============================================
#pragma once
@ -8,21 +8,7 @@
#include "aster/aster.h"
// TODO: Separate files so you only import handles.
#include "aster/core/buffer.h"
#include "aster/systems/resource.h"
#include <EASTL/vector.h>
namespace systems
{
class RenderingDevice;
}
namespace systems
{
class ResourceManager;
class CommitManager;
} // namespace systems
#include "gpu_resource_manager.h"
struct DirectionalLight
{
@ -47,51 +33,26 @@ struct LightHandle
u16 m_Index;
};
struct Light
{
union {
vec3 um_Position;
vec3 um_Direction;
};
f32 m_Range; // < 0.0 for invalid
u32 m_Color_; // LSB is used for flags. (R G B Flags)
f32 m_Intensity;
u32 m_Pad0;
u32 m_Pad1;
constexpr static u32 MAX_GEN = 0x40;
constexpr static u32 GEN_MASK = MAX_GEN - 1;
constexpr static u32 TYPE_MASK = 0xC0;
constexpr static u32 TYPE_INVALID = 0x0;
constexpr static u32 TYPE_DIRECTIONAL = 1 << 6;
constexpr static u32 TYPE_POINT = 2 << 6;
constexpr static u32 TYPE_SPOT = 3 << 6; // Currently Unused
constexpr static u32 COLOR_MASK = ~(GEN_MASK | TYPE_MASK);
};
struct Light;
struct LightManager
{
constexpr static u16 MAX_LIGHTS = MaxValue<u16>;
struct LightMetaInfo
{
// The number of directional lights is relatively low (1 - 2) and will almost never change in a scene.
// We can use that with Offset = 0, and point light at further offsets.
// This way we don't need to move point lights often.
uptr m_LightBuffer; // 08 08
u16 m_PointLightMaxCount; // 02 10
u16 m_PointLightOffset; // 02 12
u16 m_DirectionalLightMaxCount; // 02 14
u16 m_UnusedPadding0 = 0; // 02 16
BufferHandle m_LightBuffer; // 04 04
u16 m_PointLightMaxCount; // 02 06
u16 m_PointLightOffset; // 02 08
u16 m_DirectionalLightMaxCount; // 02 10
u16 m_UnusedPadding0 = 0; // 02 12
};
systems::RenderingDevice *m_Device;
GpuResourceManager *m_ResourceManager;
eastl::vector<Light> m_Lights;
Ref<Buffer> m_LightBuffer;
// We don't need a Directional Light free list. We will just brute force iterate.
u16 m_DirectionalLightCount;
@ -105,18 +66,18 @@ struct LightManager
// Using lower bit. Capacity can be directly a multiple of 2
// Thus, range is up to MaxValue<u16>
constexpr static u16 UPDATE_REQUIRED_BIT = 1;
constexpr static u16 CAPACITY_MASK = static_cast<u16>(~UPDATE_REQUIRED_BIT);
constexpr static u16 CAPACITY_MASK = Cast<u16>(~UPDATE_REQUIRED_BIT);
LightHandle AddDirectional(const vec3 &direction, const vec3 &color, f32 intensity);
LightHandle AddPoint(const vec3 &position, const vec3 &color, f32 radius, f32 intensity);
void Update();
void RemoveLight(LightHandle handle);
~LightManager() = default;
explicit LightManager(GpuResourceManager *resourceManager);
~LightManager();
explicit LightManager(systems::RenderingDevice &device);
LightManager(LightManager &&other) noexcept = default;
LightManager &operator=(LightManager &&other) noexcept = default;
LightManager(LightManager &&other) noexcept;
LightManager &operator=(LightManager &&other) noexcept;
DISALLOW_COPY_AND_ASSIGN(LightManager);
};

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show More