Compare commits

..

2 Commits

Author SHA1 Message Date
Anish Bhobe befa36c7f1 ContextPool support for unordered contexts. 2025-05-18 00:06:06 +02:00
Anish Bhobe 3b4ea52611 ContextPools for Frames. 2025-05-17 15:25:33 +02:00
12 changed files with 889 additions and 518 deletions

View File

@ -10,6 +10,7 @@ find_package(fmt CONFIG REQUIRED)
find_package(VulkanMemoryAllocator CONFIG REQUIRED)
find_package(EASTL CONFIG REQUIRED)
find_library(slang NAMES "slang" CONFIG REQUIRED)
find_package(foonathan_memory CONFIG REQUIRED)
add_library(aster_core STATIC)
@ -28,4 +29,5 @@ target_link_libraries(aster_core PRIVATE fmt::fmt)
target_link_libraries(aster_core PRIVATE EASTL)
target_link_libraries(aster_core PUBLIC Vulkan::Headers GPUOpen::VulkanMemoryAllocator)
target_link_libraries(aster_core PUBLIC ${slang})
target_link_libraries(aster_core PRIVATE foonathan_memory)

View File

@ -13,6 +13,7 @@
#include <glm/glm.hpp>
#include <fmt/format.h>
#include <EASTL/shared_ptr.h>
// Macros that can collide with functions.
#if defined(max)
@ -252,7 +253,7 @@ struct fmt::formatter<eastl::fixed_string<TType, TCount, TOverflow>> : nested_fo
};
template <typename T>
using Ref = std::shared_ptr<T>;
using Ref = eastl::shared_ptr<T>;
template <typename T>
using WeakRef = std::weak_ptr<T>;
using WeakRef = eastl::weak_ptr<T>;

View File

@ -6,4 +6,5 @@ target_sources(aster_core
INTERFACE
"device.h"
"resource.h"
"context.h"
"commit_manager.h")

View File

@ -0,0 +1,356 @@
// =============================================
// Aster: context_pool.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include "context.h"
#include <aster/aster.h>
#include <aster/core/buffer.h>
#include <aster/core/image.h>
#include <aster/core/image_view.h>
#include <aster/core/physical_device.h>
#include <aster/core/pipeline.h>
#include <EASTL/intrusive_list.h>
#include <EASTL/optional.h>
#include <EASTL/vector.h>
#include <foonathan/memory/memory_pool.hpp>
#include <foonathan/memory/namespace_alias.hpp>
namespace systems
{
class Device;
struct Frame;
namespace _internal
{
class GraphicsContextPool;
class TransferContextPool;
class ContextPool;
class OrderlessTransferContextPool;
} // namespace _internal
#define DEPRECATE_RAW_CALLS
class Context
{
protected:
_internal::ContextPool *m_Pool;
vk::CommandBuffer m_Cmd;
friend Device;
friend _internal::ContextPool;
friend _internal::OrderlessTransferContextPool;
explicit Context(_internal::ContextPool &pool, const vk::CommandBuffer cmd)
: m_Pool{&pool}
, m_Cmd{cmd}
{
}
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(const Ref<Buffer> &buffer);
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(const Ref<Image> &image);
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(const Ref<ImageView> &view);
public:
DEPRECATE_RAW_CALLS void Dependency(const vk::DependencyInfo &dependencyInfo);
void Begin();
void End();
};
class TransferContext : public Context
{
protected:
friend Device;
friend _internal::TransferContextPool;
explicit TransferContext(_internal::ContextPool &pool, const vk::CommandBuffer cmd)
: Context{pool, cmd}
{
}
public:
struct ImageData
{
void *m_Data;
usize m_NumBytes;
};
void UploadTexture(const Ref<Image> &image, const ImageData &data);
TransferContext(TransferContext &&other) noexcept;
TransferContext &operator=(TransferContext &&other) noexcept;
~TransferContext() = default;
DISALLOW_COPY_AND_ASSIGN(TransferContext);
};
class GraphicsContext : public Context
{
protected:
friend Device;
friend _internal::GraphicsContextPool;
const Pipeline *m_PipelineInUse;
explicit GraphicsContext(_internal::ContextPool &pool, const vk::CommandBuffer cmd)
: Context{pool, cmd}
, m_PipelineInUse{nullptr}
{
}
public:
DEPRECATE_RAW_CALLS void SetViewport(const vk::Viewport &viewport);
void BindVertexBuffer(const Ref<VertexBuffer> &vertexBuffer);
void BindPipeline(const Pipeline &pipeline);
void
PushConstantBlock(auto &block)
{
if constexpr (sizeof(block) > 128)
{
WARN("Vulkan only guarantees 128 bytes of Push Constants. Size of PCB is {}", sizeof block);
}
m_Cmd.pushConstants(m_PipelineInUse->m_Layout, vk::ShaderStageFlagBits::eAll, 0, sizeof block, &block);
}
void Draw(usize vertexCount);
void DrawIndexed(usize indexCount);
DEPRECATE_RAW_CALLS void BeginRendering(const vk::RenderingInfo &renderingInfo);
void EndRendering();
};
namespace _internal
{
class ContextPool
{
protected:
Device *m_Device;
vk::CommandPool m_Pool;
eastl::vector<vk::CommandBuffer> m_CommandBuffers;
u32 m_BuffersAllocated;
public:
u16 m_ExtraData;
enum class ManagedBy : u8
{
eFrame,
eDevice,
} m_ManagedBy;
protected:
eastl::vector<Ref<Buffer>> m_OwnedBuffers;
eastl::vector<Ref<Image>> m_OwnedImages;
eastl::vector<Ref<ImageView>> m_OwnedImageViews;
vk::CommandBuffer AllocateCommandBuffer();
public:
[[nodiscard]] Device &
GetDevice() const
{
assert(m_Device);
return *m_Device;
}
eastl::function<void(ContextPool &)> m_ResetCallback;
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(const Ref<Buffer> &buffer);
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(const Ref<Image> &image);
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(const Ref<ImageView> &view);
Context CreateContext();
void Reset();
ContextPool() = default;
ContextPool(Device &device, u32 queueFamilyIndex, ManagedBy managedBy);
ContextPool(ContextPool &&other) noexcept;
ContextPool &operator=(ContextPool &&other) noexcept;
bool
operator==(const ContextPool &other) const
{
return m_Pool == other.m_Pool;
}
~ContextPool();
DISALLOW_COPY_AND_ASSIGN(ContextPool);
};
class TransferContextPool : public ContextPool
{
public:
TransferContext CreateTransferContext();
TransferContextPool() = default;
TransferContextPool(Device &device, const u32 queueFamilyIndex, const ManagedBy managedBy)
: ContextPool{device, queueFamilyIndex, managedBy}
{
}
TransferContextPool(TransferContextPool &&other) noexcept = default;
TransferContextPool &operator=(TransferContextPool &&other) noexcept = default;
~TransferContextPool() = default;
DISALLOW_COPY_AND_ASSIGN(TransferContextPool);
};
class GraphicsContextPool : public TransferContextPool
{
public:
GraphicsContext CreateGraphicsContext();
GraphicsContextPool() = default;
GraphicsContextPool(Device &device, const u32 queueFamilyIndex, const ManagedBy managedBy)
: TransferContextPool{device, queueFamilyIndex, managedBy}
{
}
GraphicsContextPool(GraphicsContextPool &&other) noexcept = default;
GraphicsContextPool &operator=(GraphicsContextPool &&other) noexcept = default;
~GraphicsContextPool() = default;
DISALLOW_COPY_AND_ASSIGN(GraphicsContextPool);
};
class OrderlessTransferContextPool
{
struct TransferContextEntry : eastl::intrusive_list_node
{
TransferContextPool m_Pool;
bool
Contains(const _internal::ContextPool &other) const
{
return m_Pool == other;
}
};
using TransferContextList = eastl::intrusive_list<TransferContextEntry>;
Device *m_Device;
memory::memory_pool<> m_TransferContextMemoryPool;
TransferContextList m_FreeTransferContexts;
TransferContextList m_TransferContexts;
u32 m_QueueFamilyIndex;
constexpr static usize ENTRY_SIZE = sizeof(TransferContextEntry);
constexpr static usize ENTRIES_PER_BLOCK = 5;
constexpr static usize BLOCK_SIZE = ENTRIES_PER_BLOCK * ENTRY_SIZE;
public:
OrderlessTransferContextPool()
: m_Device{nullptr}
, m_TransferContextMemoryPool{ENTRY_SIZE, BLOCK_SIZE}
, m_QueueFamilyIndex{0}
{
}
void
Init(Device &device, const u32 queueFamilyIndex)
{
m_Device = &device;
m_QueueFamilyIndex = queueFamilyIndex;
}
TransferContext
CreateTransferContext()
{
if (!m_FreeTransferContexts.empty())
{
TransferContextEntry &entry = m_FreeTransferContexts.back();
m_FreeTransferContexts.pop_back();
m_TransferContexts.push_back(entry);
return entry.m_Pool.CreateTransferContext();
}
TransferContextEntry &entry = *static_cast<TransferContextEntry *>(m_TransferContextMemoryPool.allocate_node());
auto pool = TransferContextPool{*m_Device, m_QueueFamilyIndex, ContextPool::ManagedBy::eDevice};
pool.m_ResetCallback = [this](ContextPool &resetPool) { this->ReleasePool(resetPool); };
new (&entry) TransferContextEntry{
.m_Pool = eastl::move(pool),
};
m_TransferContexts.push_back(entry);
return entry.m_Pool.CreateTransferContext();
}
void
ReleasePool(ContextPool &pool)
{
auto const found = eastl::find_if(m_TransferContexts.begin(), m_TransferContexts.end(),
[&pool](const TransferContextEntry &v) { return v.Contains(pool); });
auto &v = *found;
TransferContextList::remove(v);
pool.Reset();
m_FreeTransferContexts.push_back(v);
}
OrderlessTransferContextPool(OrderlessTransferContextPool &&other) noexcept
: m_Device{other.m_Device}
, m_TransferContextMemoryPool{std::move(other.m_TransferContextMemoryPool)}
, m_FreeTransferContexts{other.m_FreeTransferContexts}
, m_TransferContexts{other.m_TransferContexts}
, m_QueueFamilyIndex{other.m_QueueFamilyIndex}
{
other.m_FreeTransferContexts.clear();
other.m_TransferContexts.clear();
}
OrderlessTransferContextPool &
operator=(OrderlessTransferContextPool &&other) noexcept
{
if (this == &other)
return *this;
m_Device = other.m_Device;
m_TransferContextMemoryPool = std::move(other.m_TransferContextMemoryPool);
m_FreeTransferContexts = other.m_FreeTransferContexts;
other.m_FreeTransferContexts.clear();
m_TransferContexts = other.m_TransferContexts;
other.m_TransferContexts.clear();
m_QueueFamilyIndex = other.m_QueueFamilyIndex;
return *this;
}
~OrderlessTransferContextPool()
{
for (auto &entry : m_FreeTransferContexts)
{
entry.m_Pool.~TransferContextPool();
}
for (auto &entry : m_TransferContexts)
{
entry.m_Pool.~TransferContextPool();
}
// The allocations will 'wink' away.
}
DISALLOW_COPY_AND_ASSIGN(OrderlessTransferContextPool);
};
} // namespace _internal
} // namespace systems

View File

@ -5,6 +5,7 @@
#pragma once
#include "context.h"
#include "pipeline_helpers.h"
#include "resource.h"
@ -20,15 +21,13 @@
#include "aster/core/size.h"
#include "aster/core/swapchain.h"
#include "EASTL/deque.h"
#include <EASTL/hash_map.h>
#include <EASTL/optional.h>
#include <EASTL/variant.h>
#include <slang-com-ptr.h>
#include <slang.h>
#include <variant>
constexpr static u32 MAX_FRAMES_IN_FLIGHT = 3;
struct Window;
@ -343,127 +342,34 @@ class Receipt
friend _internal::SyncServer;
};
class Device;
struct Frame;
#define DEPRECATE_RAW_CALLS
class Context
{
protected:
vk::CommandBuffer m_Cmd;
friend Device;
friend Frame;
explicit Context(const vk::CommandBuffer cmd)
: m_Cmd{cmd}
{
}
public:
DEPRECATE_RAW_CALLS void Dependency(const vk::DependencyInfo &dependencyInfo);
void Begin();
void End();
};
class GraphicsContext : public Context
{
protected:
friend Device;
friend Frame;
const Pipeline *m_PipelineInUse;
explicit GraphicsContext(const vk::CommandBuffer cmd)
: Context{cmd}
, m_PipelineInUse{nullptr}
{
}
public:
DEPRECATE_RAW_CALLS void SetViewport(const vk::Viewport &viewport);
void BindVertexBuffer(const Ref<VertexBuffer> &vertexBuffer);
void BindPipeline(const Pipeline &pipeline);
void
PushConstantBlock(auto &block)
{
if constexpr (sizeof(block) > 128)
{
WARN("Vulkan only guarantees 128 bytes of Push Constants. Size of PCB is {}", sizeof block);
}
m_Cmd.pushConstants(m_PipelineInUse->m_Layout, vk::ShaderStageFlagBits::eAll, 0, sizeof block, &block);
}
void Draw(usize vertexCount);
void DrawIndexed(usize indexCount);
DEPRECATE_RAW_CALLS void BeginRendering(const vk::RenderingInfo &renderingInfo);
void EndRendering();
};
class TransferContext : public Context
{
protected:
friend Device;
friend Frame;
Device *m_Device;
explicit TransferContext(Device &device, const vk::CommandBuffer cmd)
: Context{cmd}
, m_Device{&device}
{
}
eastl::vector<Ref<Buffer>> m_OwnedBuffers;
eastl::vector<Ref<Image>> m_OwnedImages;
void Reset();
public:
struct ImageData
{
void *m_Data;
usize m_NumBytes;
};
void UploadTexture(const Ref<Image> &image, const ImageData &data);
TransferContext(TransferContext &&other) noexcept;
TransferContext &operator=(TransferContext &&other) noexcept;
DISALLOW_COPY_AND_ASSIGN(TransferContext);
~TransferContext() = default;
};
struct Frame
{
// Persistent
Device *m_Device;
// TODO: ThreadSafe
vk::CommandPool m_Pool;
_internal::GraphicsContextPool m_PrimaryPool;
_internal::TransferContextPool m_AsyncTransferPool;
_internal::ContextPool m_AsyncComputePool;
vk::Fence m_FrameAvailableFence;
vk::Semaphore m_ImageAcquireSem;
vk::Semaphore m_RenderFinishSem;
u32 m_FrameIdx;
eastl::vector<vk::CommandBuffer> m_CommandBuffers;
// Transient
vk::Image m_SwapchainImage;
vk::ImageView m_SwapchainImageView;
Size2D m_SwapchainSize;
u32 m_ImageIdx;
u32 m_CommandBuffersAllocated;
void Reset(u32 imageIdx, vk::Image swapchainImage, vk::ImageView swapchainImageView, Size2D swapchainSize);
GraphicsContext CreateGraphicsContext();
TransferContext CreateTransferContext();
TransferContext CreateAsyncTransferContext();
void WaitUntilReady();
Frame() = default;
Frame(Device &device, u32 primaryQueueFamily, u32 frameIndex);
Frame(Device &device, u32 frameIndex, u32 primaryQueueFamily, u32 asyncTransferQueue, u32 asyncComputeQueue);
Frame(Frame &&other) noexcept;
Frame &operator=(Frame &&other) noexcept;
@ -486,12 +392,17 @@ class Device final
std::unique_ptr<CommitManager> m_CommitManager;
// TODO: This is single-threaded.
vk::Queue m_GraphicsQueue;
vk::Queue m_PrimaryQueue;
u32 m_PrimaryQueueFamily;
vk::Queue m_TransferQueue;
u32 m_TransferQueueFamily;
vk::Queue m_ComputeQueue;
u32 m_ComputeQueueFamily;
_internal::OrderlessTransferContextPool m_TransferContextPool;
std::array<Frame, MAX_FRAMES_IN_FLIGHT> m_Frames;
u32 m_CurrentFrameIdx = 0;
@ -603,9 +514,6 @@ class Device final
// Frames
// ----------------------------------------------------------------------------------------------------
private:
Frame CreateFrame(u32 frameIndex);
public:
Frame &GetNextFrame();
Size2D
@ -623,11 +531,7 @@ class Device final
friend GraphicsContext;
friend TransferContext;
vk::CommandPool m_TransferPool;
eastl::deque<TransferContext> m_TransferContexts;
eastl::vector<u32> m_TransferContextFreeList;
TransferContext &CreateTransferContext();
TransferContext CreateTransferContext();
Receipt Submit(Context &context);
//

View File

@ -41,7 +41,7 @@ CastImage(const Ref<TFrom> &from)
{
if constexpr (not concepts::ImageInto<TFrom, TTo>)
assert(TTo::FLAGS & from->m_Flags_);
return std::reinterpret_pointer_cast<TTo>(from);
return eastl::reinterpret_pointer_cast<TTo>(from);
}
#pragma endregion
@ -54,7 +54,7 @@ CastView(const Ref<View<TFrom>> &from)
{
if constexpr (not concepts::ImageInto<TFrom, typename TTo::ImageType>)
assert(TTo::ImageType::FLAGS & from->m_Image->m_Flags_);
return std::reinterpret_pointer_cast<TTo>(from);
return eastl::reinterpret_pointer_cast<TTo>(from);
}
#pragma endregion

View File

@ -4,6 +4,7 @@
// =============================================
#include "aster/aster.h"
#include "context.h"
#include <EASTL/deque.h>
#include <EASTL/intrusive_list.h>
@ -28,11 +29,20 @@ class SyncServer
{
vk::Semaphore m_Semaphore;
TimelinePoint m_CurrentPoint;
ContextPool *m_AttachedPool;
static Entry Create(Device &device);
explicit Entry(Device &device);
void Destroy(Device &device);
void Wait(Device &device);
void Next();
void AttachPool(ContextPool *pool);
Entry(Entry &&) = default;
Entry &operator=(Entry &&) = default;
~Entry() = default;
DISALLOW_COPY_AND_ASSIGN(Entry);
};
Device *m_Device;

View File

@ -7,4 +7,5 @@ PRIVATE
"device.cpp"
"commit_manager.cpp"
"pipeline_helpers.cpp"
"context.cpp"
"sync_server.cpp")

View File

@ -0,0 +1,402 @@
// =============================================
// Aster: context.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "aster/systems/context.h"
#include "aster/systems/commit_manager.h"
#include "systems/device.h"
constexpr static u32
GetFormatSize(const vk::Format format)
{
switch (format)
{
case vk::Format::eUndefined:
return 0;
case vk::Format::eR8Unorm:
case vk::Format::eR8Snorm:
case vk::Format::eR8Uscaled:
case vk::Format::eR8Sscaled:
case vk::Format::eR8Uint:
case vk::Format::eR8Sint:
case vk::Format::eR8Srgb:
return 1;
case vk::Format::eR8G8Unorm:
case vk::Format::eR8G8Snorm:
case vk::Format::eR8G8Uscaled:
case vk::Format::eR8G8Sscaled:
case vk::Format::eR8G8Uint:
case vk::Format::eR8G8Sint:
case vk::Format::eR8G8Srgb:
return 2;
case vk::Format::eR8G8B8Unorm:
case vk::Format::eR8G8B8Snorm:
case vk::Format::eR8G8B8Uscaled:
case vk::Format::eR8G8B8Sscaled:
case vk::Format::eR8G8B8Uint:
case vk::Format::eR8G8B8Sint:
case vk::Format::eR8G8B8Srgb:
case vk::Format::eB8G8R8Unorm:
case vk::Format::eB8G8R8Snorm:
case vk::Format::eB8G8R8Uscaled:
case vk::Format::eB8G8R8Sscaled:
case vk::Format::eB8G8R8Uint:
case vk::Format::eB8G8R8Sint:
case vk::Format::eB8G8R8Srgb:
return 3;
case vk::Format::eR8G8B8A8Unorm:
case vk::Format::eR8G8B8A8Snorm:
case vk::Format::eR8G8B8A8Uscaled:
case vk::Format::eR8G8B8A8Sscaled:
case vk::Format::eR8G8B8A8Uint:
case vk::Format::eR8G8B8A8Sint:
case vk::Format::eR8G8B8A8Srgb:
case vk::Format::eB8G8R8A8Unorm:
case vk::Format::eB8G8R8A8Snorm:
case vk::Format::eB8G8R8A8Uscaled:
case vk::Format::eB8G8R8A8Sscaled:
case vk::Format::eB8G8R8A8Uint:
case vk::Format::eB8G8R8A8Sint:
case vk::Format::eB8G8R8A8Srgb:
return 4;
case vk::Format::eR16Unorm:
case vk::Format::eR16Snorm:
case vk::Format::eR16Uscaled:
case vk::Format::eR16Sscaled:
case vk::Format::eR16Uint:
case vk::Format::eR16Sint:
case vk::Format::eR16Sfloat:
return 2;
case vk::Format::eR16G16Unorm:
case vk::Format::eR16G16Snorm:
case vk::Format::eR16G16Uscaled:
case vk::Format::eR16G16Sscaled:
case vk::Format::eR16G16Uint:
case vk::Format::eR16G16Sint:
case vk::Format::eR16G16Sfloat:
return 4;
case vk::Format::eR16G16B16Unorm:
case vk::Format::eR16G16B16Snorm:
case vk::Format::eR16G16B16Uscaled:
case vk::Format::eR16G16B16Sscaled:
case vk::Format::eR16G16B16Uint:
case vk::Format::eR16G16B16Sint:
case vk::Format::eR16G16B16Sfloat:
return 6;
case vk::Format::eR16G16B16A16Unorm:
case vk::Format::eR16G16B16A16Snorm:
case vk::Format::eR16G16B16A16Uscaled:
case vk::Format::eR16G16B16A16Sscaled:
case vk::Format::eR16G16B16A16Uint:
case vk::Format::eR16G16B16A16Sint:
case vk::Format::eR16G16B16A16Sfloat:
return 8;
case vk::Format::eR32Uint:
case vk::Format::eR32Sint:
case vk::Format::eR32Sfloat:
return 4;
case vk::Format::eR32G32Uint:
case vk::Format::eR32G32Sint:
case vk::Format::eR32G32Sfloat:
return 8;
case vk::Format::eR32G32B32Uint:
case vk::Format::eR32G32B32Sint:
case vk::Format::eR32G32B32Sfloat:
return 12;
case vk::Format::eR32G32B32A32Uint:
case vk::Format::eR32G32B32A32Sint:
case vk::Format::eR32G32B32A32Sfloat:
return 16;
case vk::Format::eD16Unorm:
return 2;
case vk::Format::eD32Sfloat:
return 4;
case vk::Format::eS8Uint:
return 1;
case vk::Format::eD16UnormS8Uint:
return 6;
case vk::Format::eD24UnormS8Uint:
return 4;
case vk::Format::eD32SfloatS8Uint:
return 5;
default:
TODO("Esoteric Formats");
}
return 0;
}
void
systems::Context::KeepAlive(const Ref<Buffer> &buffer)
{
assert(m_Pool);
m_Pool->KeepAlive(buffer);
}
void
systems::Context::KeepAlive(const Ref<Image> &image)
{
assert(m_Pool);
m_Pool->KeepAlive(image);
}
void
systems::Context::KeepAlive(const Ref<ImageView> &view)
{
assert(m_Pool);
m_Pool->KeepAlive(view);
}
void
systems::Context::Dependency(const vk::DependencyInfo &dependencyInfo)
{
m_Cmd.pipelineBarrier2(&dependencyInfo);
}
void
systems::Context::Begin()
{
vk::CommandBufferBeginInfo commandBufferBeginInfo = {
.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit,
};
auto result = m_Cmd.begin(&commandBufferBeginInfo);
ERROR_IF(Failed(result), "Could not begin context") THEN_ABORT(result);
}
void
systems::Context::End()
{
auto result = m_Cmd.end();
ERROR_IF(Failed(result), "Could not end context") THEN_ABORT(result);
}
void
systems::GraphicsContext::SetViewport(const vk::Viewport &viewport)
{
m_Cmd.setViewport(0, 1, &viewport);
}
void
systems::GraphicsContext::BindVertexBuffer(const Ref<VertexBuffer> &vertexBuffer)
{
constexpr vk::DeviceSize offset = 0;
m_Cmd.bindVertexBuffers(0, 1, &vertexBuffer->m_Buffer, &offset);
}
void
systems::GraphicsContext::BindPipeline(const Pipeline &pipeline)
{
m_Cmd.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline.m_Pipeline);
// TODO: Maybe find a smarter place to host this.
if (CommitManager::IsInit())
{
m_Cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline.m_Layout, 0, 1,
&CommitManager::Instance().GetDescriptorSet(), 0, nullptr);
}
m_PipelineInUse = &pipeline;
}
void
systems::GraphicsContext::Draw(const usize vertexCount)
{
m_Cmd.draw(static_cast<u32>(vertexCount), 1, 0, 0);
}
void
systems::GraphicsContext::DrawIndexed(usize indexCount)
{
m_Cmd.drawIndexed(static_cast<u32>(indexCount), 1, 0, 0, 0);
}
void
systems::GraphicsContext::BeginRendering(const vk::RenderingInfo &renderingInfo)
{
m_Cmd.beginRendering(&renderingInfo);
m_Cmd.setScissor(0, 1, &renderingInfo.renderArea);
}
void
systems::GraphicsContext::EndRendering()
{
m_Cmd.endRendering();
}
void
systems::TransferContext::UploadTexture(const Ref<Image> &image, const ImageData &data)
{
ERROR_IF(not(image and image->IsValid()), "Invalid image");
auto [w, h, d] = image->m_Extent;
auto formatSize = GetFormatSize(image->m_Format);
auto expectedByteSize = static_cast<u64>(w) * static_cast<u64>(h) * static_cast<u64>(d) * formatSize;
ERROR_IF(expectedByteSize != data.m_NumBytes, "Mismatch in data size {} vs image size {} ({}x{}x{}x{})",
data.m_NumBytes, expectedByteSize, w, h, d, formatSize);
const Ref<StagingBuffer> stagingBuffer = m_Pool->GetDevice().CreateStagingBuffer(data.m_NumBytes);
stagingBuffer->Write(0, data.m_NumBytes, data.m_Data);
const vk::BufferImageCopy bufferImageCopy = {
.bufferOffset = 0,
.bufferRowLength = w,
.bufferImageHeight = h,
.imageSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {},
.imageExtent = image->m_Extent,
};
m_Cmd.copyBufferToImage(stagingBuffer->m_Buffer, image->m_Image, vk::ImageLayout::eTransferDstOptimal, 1,
&bufferImageCopy);
KeepAlive(stagingBuffer);
KeepAlive(image);
}
systems::TransferContext::TransferContext(TransferContext &&other) noexcept
: Context{std::move(other)}
{
}
systems::TransferContext &
systems::TransferContext::operator=(TransferContext &&other) noexcept
{
if (this == &other)
return *this;
Context::operator=(std::move(other));
return *this;
}
using namespace systems::_internal;
ContextPool::ContextPool(Device &device, const u32 queueFamilyIndex, const ManagedBy managedBy)
: m_Device{&device}
, m_BuffersAllocated{0}
, m_ExtraData{0}
, m_ManagedBy{managedBy}
, m_ResetCallback{}
{
const vk::CommandPoolCreateInfo commandPoolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient,
.queueFamilyIndex = queueFamilyIndex,
};
AbortIfFailed(device.m_Device->createCommandPool(&commandPoolCreateInfo, nullptr, &m_Pool));
}
ContextPool::ContextPool(ContextPool &&other) noexcept
: m_Device{other.m_Device}
, m_Pool{Take(other.m_Pool)}
, m_CommandBuffers{std::move(other.m_CommandBuffers)}
, m_BuffersAllocated{other.m_BuffersAllocated}
, m_ExtraData{other.m_ExtraData}
, m_ManagedBy{other.m_ManagedBy}
, m_OwnedBuffers{std::move(other.m_OwnedBuffers)}
, m_OwnedImages{std::move(other.m_OwnedImages)}
, m_OwnedImageViews{std::move(other.m_OwnedImageViews)}
, m_ResetCallback{std::move(other.m_ResetCallback)}
{
}
ContextPool &
ContextPool::operator=(ContextPool &&other) noexcept
{
if (this == &other)
return *this;
using eastl::swap;
swap(m_Device, other.m_Device);
swap(m_Pool, other.m_Pool);
swap(m_CommandBuffers, other.m_CommandBuffers);
swap(m_ExtraData, other.m_ExtraData);
swap(m_ManagedBy, other.m_ManagedBy);
swap(m_BuffersAllocated, other.m_BuffersAllocated);
swap(m_OwnedBuffers, other.m_OwnedBuffers);
swap(m_OwnedImages, other.m_OwnedImages);
swap(m_OwnedImageViews, other.m_OwnedImageViews);
swap(m_ResetCallback, other.m_ResetCallback);
return *this;
}
ContextPool::~ContextPool()
{
if (!m_Pool)
return;
m_Device->m_Device->destroy(Take(m_Pool), nullptr);
}
void
ContextPool::KeepAlive(const Ref<Buffer> &buffer)
{
m_OwnedBuffers.push_back(buffer);
}
void
ContextPool::KeepAlive(const Ref<Image> &image)
{
m_OwnedImages.push_back(image);
}
void
ContextPool::KeepAlive(const Ref<ImageView> &view)
{
m_OwnedImageViews.push_back(view);
}
vk::CommandBuffer
ContextPool::AllocateCommandBuffer()
{
// Buffers are available.
if (m_BuffersAllocated < m_CommandBuffers.size())
{
return m_CommandBuffers[m_BuffersAllocated++];
}
// Allocate New Buffer.
const vk::CommandBufferAllocateInfo allocateInfo = {
.commandPool = m_Pool,
.level = vk::CommandBufferLevel::ePrimary,
.commandBufferCount = 1,
};
vk::CommandBuffer &cmd = m_CommandBuffers.emplace_back();
AbortIfFailed(m_Device->m_Device->allocateCommandBuffers(&allocateInfo, &cmd));
return cmd;
}
systems::Context
ContextPool::CreateContext()
{
return Context{*this, AllocateCommandBuffer()};
}
void
ContextPool::Reset()
{
assert(m_Pool);
AbortIfFailed(m_Device->m_Device->resetCommandPool(m_Pool, {}));
m_BuffersAllocated = 0;
m_OwnedBuffers.clear();
m_OwnedImages.clear();
m_OwnedImageViews.clear();
}
systems::TransferContext
TransferContextPool::CreateTransferContext()
{
return TransferContext{*this, AllocateCommandBuffer()};
}
systems::GraphicsContext
GraphicsContextPool::CreateGraphicsContext()
{
return GraphicsContext{*this, AllocateCommandBuffer()};
}

View File

@ -12,6 +12,7 @@
#include "aster/systems/sync_server.h"
#include "aster/util/files.h"
#include "systems/commit_manager.h"
#include "systems/context.h"
#include <EASTL/vector_map.h>
#include <fmt/ranges.h>
@ -20,126 +21,6 @@ static constexpr QueueSupportFlags REQUIRED_QUEUE_SUPPORT =
QueueSupportFlags{} | QueueSupportFlagBits::eGraphics | QueueSupportFlagBits::eCompute |
QueueSupportFlagBits::ePresent | QueueSupportFlagBits::eTransfer;
constexpr static u32
GetFormatSize(const vk::Format format)
{
switch (format)
{
case vk::Format::eUndefined:
return 0;
case vk::Format::eR8Unorm:
case vk::Format::eR8Snorm:
case vk::Format::eR8Uscaled:
case vk::Format::eR8Sscaled:
case vk::Format::eR8Uint:
case vk::Format::eR8Sint:
case vk::Format::eR8Srgb:
return 1;
case vk::Format::eR8G8Unorm:
case vk::Format::eR8G8Snorm:
case vk::Format::eR8G8Uscaled:
case vk::Format::eR8G8Sscaled:
case vk::Format::eR8G8Uint:
case vk::Format::eR8G8Sint:
case vk::Format::eR8G8Srgb:
return 2;
case vk::Format::eR8G8B8Unorm:
case vk::Format::eR8G8B8Snorm:
case vk::Format::eR8G8B8Uscaled:
case vk::Format::eR8G8B8Sscaled:
case vk::Format::eR8G8B8Uint:
case vk::Format::eR8G8B8Sint:
case vk::Format::eR8G8B8Srgb:
case vk::Format::eB8G8R8Unorm:
case vk::Format::eB8G8R8Snorm:
case vk::Format::eB8G8R8Uscaled:
case vk::Format::eB8G8R8Sscaled:
case vk::Format::eB8G8R8Uint:
case vk::Format::eB8G8R8Sint:
case vk::Format::eB8G8R8Srgb:
return 3;
case vk::Format::eR8G8B8A8Unorm:
case vk::Format::eR8G8B8A8Snorm:
case vk::Format::eR8G8B8A8Uscaled:
case vk::Format::eR8G8B8A8Sscaled:
case vk::Format::eR8G8B8A8Uint:
case vk::Format::eR8G8B8A8Sint:
case vk::Format::eR8G8B8A8Srgb:
case vk::Format::eB8G8R8A8Unorm:
case vk::Format::eB8G8R8A8Snorm:
case vk::Format::eB8G8R8A8Uscaled:
case vk::Format::eB8G8R8A8Sscaled:
case vk::Format::eB8G8R8A8Uint:
case vk::Format::eB8G8R8A8Sint:
case vk::Format::eB8G8R8A8Srgb:
return 4;
case vk::Format::eR16Unorm:
case vk::Format::eR16Snorm:
case vk::Format::eR16Uscaled:
case vk::Format::eR16Sscaled:
case vk::Format::eR16Uint:
case vk::Format::eR16Sint:
case vk::Format::eR16Sfloat:
return 2;
case vk::Format::eR16G16Unorm:
case vk::Format::eR16G16Snorm:
case vk::Format::eR16G16Uscaled:
case vk::Format::eR16G16Sscaled:
case vk::Format::eR16G16Uint:
case vk::Format::eR16G16Sint:
case vk::Format::eR16G16Sfloat:
return 4;
case vk::Format::eR16G16B16Unorm:
case vk::Format::eR16G16B16Snorm:
case vk::Format::eR16G16B16Uscaled:
case vk::Format::eR16G16B16Sscaled:
case vk::Format::eR16G16B16Uint:
case vk::Format::eR16G16B16Sint:
case vk::Format::eR16G16B16Sfloat:
return 6;
case vk::Format::eR16G16B16A16Unorm:
case vk::Format::eR16G16B16A16Snorm:
case vk::Format::eR16G16B16A16Uscaled:
case vk::Format::eR16G16B16A16Sscaled:
case vk::Format::eR16G16B16A16Uint:
case vk::Format::eR16G16B16A16Sint:
case vk::Format::eR16G16B16A16Sfloat:
return 8;
case vk::Format::eR32Uint:
case vk::Format::eR32Sint:
case vk::Format::eR32Sfloat:
return 4;
case vk::Format::eR32G32Uint:
case vk::Format::eR32G32Sint:
case vk::Format::eR32G32Sfloat:
return 8;
case vk::Format::eR32G32B32Uint:
case vk::Format::eR32G32B32Sint:
case vk::Format::eR32G32B32Sfloat:
return 12;
case vk::Format::eR32G32B32A32Uint:
case vk::Format::eR32G32B32A32Sint:
case vk::Format::eR32G32B32A32Sfloat:
return 16;
case vk::Format::eD16Unorm:
return 2;
case vk::Format::eD32Sfloat:
return 4;
case vk::Format::eS8Uint:
return 1;
case vk::Format::eD16UnormS8Uint:
return 6;
case vk::Format::eD24UnormS8Uint:
return 4;
case vk::Format::eD32SfloatS8Uint:
return 5;
default:
TODO("Esoteric Formats");
}
return 0;
}
PhysicalDevice
systems::DefaultPhysicalDeviceSelector(const PhysicalDevices &physicalDevices)
{
@ -182,7 +63,7 @@ systems::Device::CreateStorageBuffer(const usize size, const cstr name)
VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT |
VMA_ALLOCATION_CREATE_MAPPED_BIT;
constexpr VmaMemoryUsage memoryUsage = VMA_MEMORY_USAGE_AUTO;
return std::make_shared<StorageBuffer>(Buffer{&m_Device, size, usage, createFlags, memoryUsage, name});
return eastl::make_shared<StorageBuffer>(Buffer{&m_Device, size, usage, createFlags, memoryUsage, name});
}
Ref<UniformBuffer>
@ -193,7 +74,7 @@ systems::Device::CreateUniformBuffer(const usize size, const cstr name)
VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT |
VMA_ALLOCATION_CREATE_MAPPED_BIT;
constexpr VmaMemoryUsage memoryUsage = VMA_MEMORY_USAGE_AUTO;
return std::make_shared<UniformBuffer>(Buffer{&m_Device, size, usage, createFlags, memoryUsage, name});
return eastl::make_shared<UniformBuffer>(Buffer{&m_Device, size, usage, createFlags, memoryUsage, name});
}
Ref<StagingBuffer>
@ -203,7 +84,7 @@ systems::Device::CreateStagingBuffer(const usize size, const cstr name)
constexpr VmaAllocationCreateFlags createFlags =
VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT;
constexpr VmaMemoryUsage memoryUsage = VMA_MEMORY_USAGE_AUTO;
return std::make_shared<StagingBuffer>(Buffer{&m_Device, size, usage, createFlags, memoryUsage, name});
return eastl::make_shared<StagingBuffer>(Buffer{&m_Device, size, usage, createFlags, memoryUsage, name});
}
Ref<VertexBuffer>
@ -213,7 +94,7 @@ systems::Device::CreateVertexBuffer(const usize size, const cstr name)
constexpr VmaAllocationCreateFlags createFlags =
VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT;
constexpr VmaMemoryUsage memoryUsage = VMA_MEMORY_USAGE_AUTO;
return std::make_shared<VertexBuffer>(Buffer{&m_Device, size, usage, createFlags, memoryUsage, name});
return eastl::make_shared<VertexBuffer>(Buffer{&m_Device, size, usage, createFlags, memoryUsage, name});
}
#pragma endregion
@ -265,8 +146,8 @@ systems::Device::CreateTexture2D(const Texture2DCreateInfo &createInfo)
m_Device.SetName(image, createInfo.m_Name);
return std::make_shared<Image>(&m_Device, image, allocation, imageCreateInfo.extent, imageCreateInfo.format, flags,
layerCount, mipLevels);
return eastl::make_shared<Image>(&m_Device, image, allocation, imageCreateInfo.extent, imageCreateInfo.format,
flags, layerCount, mipLevels);
}
Ref<ImageCube>
@ -297,7 +178,7 @@ systems::Device::CreateTextureCube(const TextureCubeCreateInfo &createInfo)
m_Device.SetName(image, createInfo.m_Name);
return CastImage<ImageCube>(std::make_shared<Image>(&m_Device, image, allocation, imageCreateInfo.extent,
return CastImage<ImageCube>(eastl::make_shared<Image>(&m_Device, image, allocation, imageCreateInfo.extent,
imageCreateInfo.format, flags, layerCount, mipLevels));
}
@ -324,7 +205,7 @@ systems::Device::CreateAttachment(const AttachmentCreateInfo &createInfo)
m_Device.SetName(image, createInfo.m_Name);
return std::make_shared<Image>(&m_Device, image, allocation, imageCreateInfo.extent, imageCreateInfo.format,
return eastl::make_shared<Image>(&m_Device, image, allocation, imageCreateInfo.extent, imageCreateInfo.format,
Image::Flags{}, layerCount, mipLevels);
}
@ -351,7 +232,7 @@ systems::Device::CreateDepthStencilImage(const DepthStencilImageCreateInfo &crea
m_Device.SetName(image, createInfo.m_Name);
return std::make_shared<Image>(&m_Device, image, allocation, imageCreateInfo.extent, imageCreateInfo.format,
return eastl::make_shared<Image>(&m_Device, image, allocation, imageCreateInfo.extent, imageCreateInfo.format,
Image::Flags{}, layerCount, mipLevels);
}
@ -470,7 +351,7 @@ systems::Device::CreateView(const ViewCreateInfo<Image> &createInfo)
m_Device.SetName(view, createInfo.m_Name);
return std::make_shared<ImageView>(createInfo.m_Image, view, createInfo.m_Image->m_Extent, createInfo.m_BaseLayer,
return eastl::make_shared<ImageView>(createInfo.m_Image, view, createInfo.m_Image->m_Extent, createInfo.m_BaseLayer,
layerCount, createInfo.m_BaseMipLevel, mipCount);
}
@ -541,7 +422,7 @@ systems::Device::CreateSampler(const SamplerCreateInfo &createInfo)
return iter->second.lock();
}
auto object = std::make_shared<Sampler>(&m_Device, vkCreateInfo, createInfo.m_Name ? createInfo.m_Name : nullptr);
auto object = eastl::make_shared<Sampler>(&m_Device, vkCreateInfo, createInfo.m_Name ? createInfo.m_Name : nullptr);
m_HashToSamplerIdx.emplace(vkCreateInfo, object);
return object;
@ -1029,14 +910,27 @@ systems::Device::Device(const DeviceCreateInfo &createInfo)
m_TransferQueueFamily = m_PrimaryQueueFamily;
}
// TODO: Async Compute
u32 computeQueueIndex;
if (const auto asyncCompute = FindAsyncComputeQueue(physicalDevice, m_PrimaryQueueFamily))
{
const QueueAllocation allocation = asyncCompute.value();
queueAllocations.push_back(allocation);
m_ComputeQueueFamily = allocation.m_Family;
computeQueueIndex = 0;
}
else
{
computeQueueIndex = primaryQueue.m_Count;
++primaryQueue.m_Count;
m_ComputeQueueFamily = m_PrimaryQueueFamily;
}
m_Device = ::Device{m_Instance, physicalDevice, features, queueAllocations, createInfo.m_PipelineCacheData,
createInfo.m_Name};
m_GraphicsQueue = m_Device.GetQueue(m_PrimaryQueueFamily, primaryQueueIndex);
m_PrimaryQueue = m_Device.GetQueue(m_PrimaryQueueFamily, primaryQueueIndex);
m_TransferQueue = m_Device.GetQueue(m_TransferQueueFamily, transferQueueIndex);
m_ComputeQueue = m_Device.GetQueue(m_ComputeQueueFamily, computeQueueIndex);
m_Swapchain = Swapchain{m_Surface, m_Device, m_Window.get().GetSize()};
@ -1045,11 +939,7 @@ systems::Device::Device(const DeviceCreateInfo &createInfo)
m_CommitManager = std::make_unique<CommitManager>(this, 1000, 1000, 1000, CreateSampler({}));
}
const vk::CommandPoolCreateInfo transferPoolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient | vk::CommandPoolCreateFlagBits::eResetCommandBuffer,
.queueFamilyIndex = m_TransferQueueFamily,
};
AbortIfFailed(m_Device->createCommandPool(&transferPoolCreateInfo, nullptr, &m_TransferPool));
m_TransferContextPool.Init(*this, m_TransferQueueFamily);
constexpr SlangGlobalSessionDesc globalSessionDesc = {};
auto result = slang::createGlobalSession(&globalSessionDesc, m_GlobalSlangSession.writeRef());
@ -1091,7 +981,7 @@ systems::Device::Device(const DeviceCreateInfo &createInfo)
u32 index = 0;
for (auto &frame : m_Frames)
{
frame = Frame(*this, m_PrimaryQueueFamily, index++);
frame = Frame(*this, index++, m_PrimaryQueueFamily, m_TransferQueueFamily, m_ComputeQueueFamily);
}
}
@ -1102,10 +992,7 @@ systems::Device::~Device()
m_Device->destroy(Take(frame.m_FrameAvailableFence), nullptr);
m_Device->destroy(Take(frame.m_ImageAcquireSem), nullptr);
m_Device->destroy(Take(frame.m_RenderFinishSem), nullptr);
m_Device->destroy(Take(frame.m_Pool), nullptr);
}
m_Device->destroy(Take(m_TransferPool), nullptr);
m_TransferContexts.clear();
m_HashToSamplerIdx.clear();
}
@ -1164,7 +1051,7 @@ systems::Device::Present(Frame &frame, GraphicsContext &graphicsContext)
.signalSemaphoreCount = 1,
.pSignalSemaphores = &frame.m_RenderFinishSem,
};
vk::Result result = m_GraphicsQueue.submit(1, &submitInfo, frame.m_FrameAvailableFence);
vk::Result result = m_PrimaryQueue.submit(1, &submitInfo, frame.m_FrameAvailableFence);
ERROR_IF(Failed(result), "Command queue submit failed. Cause: {}", result)
THEN_ABORT(result);
@ -1176,7 +1063,7 @@ systems::Device::Present(Frame &frame, GraphicsContext &graphicsContext)
.pImageIndices = &frame.m_ImageIdx,
.pResults = nullptr,
};
switch (result = m_GraphicsQueue.presentKHR(&presentInfo))
switch (result = m_PrimaryQueue.presentKHR(&presentInfo))
{
case vk::Result::eSuccess:
break;
@ -1190,36 +1077,20 @@ systems::Device::Present(Frame &frame, GraphicsContext &graphicsContext)
}
}
systems::TransferContext &
systems::TransferContext
systems::Device::CreateTransferContext()
{
if (!m_TransferContextFreeList.empty())
{
u32 freeIndex = m_TransferContextFreeList.back();
m_TransferContextFreeList.pop_back();
TransferContext &context = m_TransferContexts[freeIndex];
return context;
}
vk::CommandBuffer cmd;
vk::CommandBufferAllocateInfo allocateInfo = {
.commandPool = m_TransferPool,
.level = vk::CommandBufferLevel::ePrimary,
.commandBufferCount = 1,
};
AbortIfFailed(m_Device->allocateCommandBuffers(&allocateInfo, &cmd));
m_TransferContexts.push_back(TransferContext{*this, cmd});
return m_TransferContexts.back();
return m_TransferContextPool.CreateTransferContext();
}
systems::Receipt
systems::Device::Submit(Context &context)
{
const auto rect = m_SyncServer->Allocate();
auto entry = m_SyncServer->GetEntry(rect);
auto &entry = m_SyncServer->GetEntry(rect);
if (context.m_Pool->m_ManagedBy == _internal::ContextPool::ManagedBy::eDevice)
entry.AttachPool(context.m_Pool);
auto [wait, next] = entry.m_CurrentPoint;
vk::TimelineSemaphoreSubmitInfo timelineSubmit = {
@ -1241,7 +1112,7 @@ systems::Device::Submit(Context &context)
.signalSemaphoreCount = 1,
.pSignalSemaphores = &entry.m_Semaphore,
};
vk::Result result = m_GraphicsQueue.submit(1, &submitInfo, {});
vk::Result result = m_PrimaryQueue.submit(1, &submitInfo, {});
ERROR_IF(Failed(result), "Command queue submit failed. Cause: {}", result)
THEN_ABORT(result);
@ -1259,8 +1130,9 @@ systems::Frame::Reset(u32 imageIdx, vk::Image swapchainImage, vk::ImageView swap
{
AbortIfFailedMV(m_Device->m_Device->resetFences(1, &m_FrameAvailableFence), "Fence {} reset failed.", m_FrameIdx);
AbortIfFailedMV(m_Device->m_Device->resetCommandPool(m_Pool, {}), "Command pool {} reset failed.", m_FrameIdx);
m_CommandBuffersAllocated = 0;
m_PrimaryPool.Reset();
m_AsyncTransferPool.Reset();
m_AsyncComputePool.Reset();
m_ImageIdx = imageIdx;
m_SwapchainImage = swapchainImage;
@ -1271,47 +1143,13 @@ systems::Frame::Reset(u32 imageIdx, vk::Image swapchainImage, vk::ImageView swap
systems::GraphicsContext
systems::Frame::CreateGraphicsContext()
{
vk::CommandBuffer cmd;
if (m_CommandBuffers.size() > m_CommandBuffersAllocated)
{
cmd = m_CommandBuffers[m_CommandBuffersAllocated++];
}
else
{
const vk::CommandBufferAllocateInfo allocateInfo{
.commandPool = m_Pool,
.level = vk::CommandBufferLevel::ePrimary,
.commandBufferCount = 1,
};
AbortIfFailedMV(m_Device->m_Device->allocateCommandBuffers(&allocateInfo, &cmd),
"Command buffer {} alloc failed.", m_FrameIdx);
m_CommandBuffers.push_back(cmd);
}
return GraphicsContext{cmd};
return m_PrimaryPool.CreateGraphicsContext();
}
systems::TransferContext
systems::Frame::CreateTransferContext()
systems::Frame::CreateAsyncTransferContext()
{
vk::CommandBuffer cmd;
if (m_CommandBuffers.size() > m_CommandBuffersAllocated)
{
cmd = m_CommandBuffers[m_CommandBuffersAllocated++];
}
else
{
const vk::CommandBufferAllocateInfo allocateInfo{
.commandPool = m_Pool,
.level = vk::CommandBufferLevel::ePrimary,
.commandBufferCount = 1,
};
AbortIfFailedMV(m_Device->m_Device->allocateCommandBuffers(&allocateInfo, &cmd),
"Command buffer {} alloc failed.", m_FrameIdx);
m_CommandBuffers.push_back(cmd);
}
return TransferContext{*m_Device, cmd};
return m_AsyncTransferPool.CreateTransferContext();
}
void
@ -1321,21 +1159,38 @@ systems::Frame::WaitUntilReady()
"Waiting for fence {} failed.", m_FrameIdx);
}
systems::Frame::Frame(Device &device, u32 primaryQueueFamily, u32 frameIndex)
systems::Frame &
systems::Frame::operator=(Frame &&other) noexcept
{
if (this == &other)
return *this;
m_Device = other.m_Device;
m_PrimaryPool = Take(other.m_PrimaryPool);
m_AsyncTransferPool = Take(other.m_AsyncTransferPool);
m_AsyncComputePool = Take(other.m_AsyncComputePool);
m_FrameAvailableFence = Take(other.m_FrameAvailableFence);
m_ImageAcquireSem = Take(other.m_ImageAcquireSem);
m_RenderFinishSem = Take(other.m_RenderFinishSem);
m_FrameIdx = other.m_FrameIdx;
m_SwapchainImage = Take(other.m_SwapchainImage);
m_SwapchainImageView = Take(other.m_SwapchainImageView);
m_SwapchainSize = Take(other.m_SwapchainSize);
m_ImageIdx = other.m_ImageIdx;
return *this;
}
systems::Frame::Frame(Device &device, u32 frameIndex, u32 const primaryQueueFamily, u32 const asyncTransferQueue,
u32 const asyncComputeQueue)
: m_Device{&device}
, m_PrimaryPool{device, primaryQueueFamily, _internal::ContextPool::ManagedBy::eFrame}
, m_AsyncTransferPool{device, asyncTransferQueue, _internal::ContextPool::ManagedBy::eFrame}
, m_AsyncComputePool{device, asyncComputeQueue, _internal::ContextPool::ManagedBy::eFrame}
, m_FrameIdx{frameIndex}
, m_ImageIdx{0}
{
NameString name = "Frame ";
name += static_cast<char>(frameIndex + '0');
const vk::CommandPoolCreateInfo commandPoolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient,
.queueFamilyIndex = primaryQueueFamily,
};
AbortIfFailedMV(device.m_Device->createCommandPool(&commandPoolCreateInfo, nullptr, &m_Pool),
"Could not command pool for frame {}", frameIndex);
constexpr vk::FenceCreateInfo fenceCreateInfo = {.flags = vk::FenceCreateFlagBits::eSignaled};
AbortIfFailedMV(device.m_Device->createFence(&fenceCreateInfo, nullptr, &m_FrameAvailableFence),
"Could not create a fence for frame {}", frameIndex);
@ -1346,7 +1201,6 @@ systems::Frame::Frame(Device &device, u32 primaryQueueFamily, u32 frameIndex)
AbortIfFailedMV(device.m_Device->createSemaphore(&semaphoreCreateInfo, nullptr, &m_RenderFinishSem),
"Could not create RF semaphore for frame {}.", frameIndex);
m_Device->SetName(m_Pool, name.c_str());
m_Device->SetName(m_FrameAvailableFence, name.c_str());
m_Device->SetName(m_ImageAcquireSem, name.c_str());
m_Device->SetName(m_RenderFinishSem, name.c_str());
@ -1354,186 +1208,20 @@ systems::Frame::Frame(Device &device, u32 primaryQueueFamily, u32 frameIndex)
DEBUG("Frame {} created successfully.", frameIndex);
}
systems::Frame &
systems::Frame::operator=(Frame &&other) noexcept
{
if (this == &other)
return *this;
m_Device = other.m_Device;
m_Pool = Take(other.m_Pool);
m_FrameAvailableFence = Take(other.m_FrameAvailableFence);
m_ImageAcquireSem = Take(other.m_ImageAcquireSem);
m_RenderFinishSem = Take(other.m_RenderFinishSem);
m_FrameIdx = other.m_FrameIdx;
m_CommandBuffers = Take(other.m_CommandBuffers);
m_SwapchainImage = Take(other.m_SwapchainImage);
m_SwapchainImageView = Take(other.m_SwapchainImageView);
m_SwapchainSize = Take(other.m_SwapchainSize);
m_ImageIdx = other.m_ImageIdx;
m_CommandBuffersAllocated = other.m_CommandBuffersAllocated;
return *this;
}
systems::Frame::Frame(Frame &&other) noexcept
: m_Device{Take(other.m_Device)}
, m_Pool{Take(other.m_Pool)}
, m_PrimaryPool{std::move(other.m_PrimaryPool)}
, m_AsyncTransferPool{std::move(other.m_AsyncTransferPool)}
, m_AsyncComputePool{std::move(other.m_AsyncComputePool)}
, m_FrameAvailableFence{Take(other.m_FrameAvailableFence)}
, m_ImageAcquireSem{Take(other.m_ImageAcquireSem)}
, m_RenderFinishSem{Take(other.m_RenderFinishSem)}
, m_FrameIdx{other.m_FrameIdx}
, m_CommandBuffers{Take(other.m_CommandBuffers)}
, m_SwapchainImage{Take(other.m_SwapchainImage)}
, m_SwapchainImageView{Take(other.m_SwapchainImageView)}
, m_SwapchainSize{Take(other.m_SwapchainSize)}
, m_ImageIdx{other.m_ImageIdx}
, m_CommandBuffersAllocated{other.m_CommandBuffersAllocated}
{
}
#pragma endregion
// ====================================================================================================
#pragma region Context Impl
// ====================================================================================================
void
systems::Context::Dependency(const vk::DependencyInfo &dependencyInfo)
{
m_Cmd.pipelineBarrier2(&dependencyInfo);
}
void
systems::Context::Begin()
{
vk::CommandBufferBeginInfo commandBufferBeginInfo = {
.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit,
};
auto result = m_Cmd.begin(&commandBufferBeginInfo);
ERROR_IF(Failed(result), "Could not begin context") THEN_ABORT(result);
}
void
systems::Context::End()
{
auto result = m_Cmd.end();
ERROR_IF(Failed(result), "Could not end context") THEN_ABORT(result);
}
void
systems::GraphicsContext::SetViewport(const vk::Viewport &viewport)
{
m_Cmd.setViewport(0, 1, &viewport);
}
void
systems::GraphicsContext::BindVertexBuffer(const Ref<VertexBuffer> &vertexBuffer)
{
constexpr vk::DeviceSize offset = 0;
m_Cmd.bindVertexBuffers(0, 1, &vertexBuffer->m_Buffer, &offset);
}
void
systems::GraphicsContext::BindPipeline(const Pipeline &pipeline)
{
m_Cmd.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline.m_Pipeline);
// TODO: Maybe find a smarter place to host this.
if (CommitManager::IsInit())
{
m_Cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline.m_Layout, 0, 1,
&CommitManager::Instance().GetDescriptorSet(), 0, nullptr);
}
m_PipelineInUse = &pipeline;
}
void
systems::GraphicsContext::Draw(const usize vertexCount)
{
m_Cmd.draw(static_cast<u32>(vertexCount), 1, 0, 0);
}
void
systems::GraphicsContext::DrawIndexed(usize indexCount)
{
m_Cmd.drawIndexed(static_cast<u32>(indexCount), 1, 0, 0, 0);
}
void
systems::GraphicsContext::BeginRendering(const vk::RenderingInfo &renderingInfo)
{
m_Cmd.beginRendering(&renderingInfo);
m_Cmd.setScissor(0, 1, &renderingInfo.renderArea);
}
void
systems::GraphicsContext::EndRendering()
{
m_Cmd.endRendering();
}
void
systems::TransferContext::Reset()
{
m_OwnedImages.clear();
m_OwnedBuffers.clear();
AbortIfFailed(m_Cmd.reset({}));
}
void
systems::TransferContext::UploadTexture(const Ref<Image> &image, const ImageData &data)
{
ERROR_IF(not(image and image->IsValid()), "Invalid image");
auto [w, h, d] = image->m_Extent;
auto formatSize = GetFormatSize(image->m_Format);
auto expectedByteSize = static_cast<u64>(w) * static_cast<u64>(h) * static_cast<u64>(d) * formatSize;
ERROR_IF(expectedByteSize != data.m_NumBytes, "Mismatch in data size {} vs image size {} ({}x{}x{}x{})",
data.m_NumBytes, expectedByteSize, w, h, d, formatSize);
const Ref<StagingBuffer> stagingBuffer = m_Device->CreateStagingBuffer(data.m_NumBytes);
stagingBuffer->Write(0, data.m_NumBytes, data.m_Data);
const vk::BufferImageCopy bufferImageCopy = {
.bufferOffset = 0,
.bufferRowLength = w,
.bufferImageHeight = h,
.imageSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {},
.imageExtent = image->m_Extent,
};
m_Cmd.copyBufferToImage(stagingBuffer->m_Buffer, image->m_Image, vk::ImageLayout::eTransferDstOptimal, 1,
&bufferImageCopy);
m_OwnedBuffers.push_back(stagingBuffer);
m_OwnedImages.push_back(image);
}
systems::TransferContext::TransferContext(TransferContext &&other) noexcept
: Context{other.m_Cmd}
, m_Device{Take(other.m_Device)}
, m_OwnedBuffers{std::move(other.m_OwnedBuffers)}
, m_OwnedImages{std::move(other.m_OwnedImages)}
{
}
systems::TransferContext &
systems::TransferContext::operator=(TransferContext &&other) noexcept
{
if (this == &other)
return *this;
m_Cmd = other.m_Cmd;
m_Device = Take(other.m_Device);
m_OwnedBuffers = std::move(other.m_OwnedBuffers);
m_OwnedImages = std::move(other.m_OwnedImages);
return *this;
}
#pragma endregion

View File

@ -9,8 +9,9 @@
using namespace systems::_internal;
SyncServer::Entry
SyncServer::Entry::Create(Device &device)
SyncServer::Entry::Entry(Device &device)
: m_CurrentPoint{0, 1}
, m_AttachedPool{nullptr}
{
constexpr static vk::SemaphoreTypeCreateInfo TYPE_CREATE_INFO = {
.semaphoreType = vk::SemaphoreType::eTimeline,
@ -18,13 +19,7 @@ SyncServer::Entry::Create(Device &device)
};
constexpr static vk::SemaphoreCreateInfo SEMAPHORE_CREATE_INFO = {.pNext = &TYPE_CREATE_INFO};
vk::Semaphore semaphore;
AbortIfFailed(device.m_Device->createSemaphore(&SEMAPHORE_CREATE_INFO, nullptr, &semaphore));
return {
.m_Semaphore = semaphore,
.m_CurrentPoint = {.m_WaitValue = 0, .m_NextValue = 1},
};
AbortIfFailed(device.m_Device->createSemaphore(&SEMAPHORE_CREATE_INFO, nullptr, &m_Semaphore));
}
void
@ -51,6 +46,11 @@ SyncServer::Entry::Wait(Device &device)
// Thus, this is safe.
m_CurrentPoint.m_WaitValue = m_CurrentPoint.m_NextValue;
m_CurrentPoint.m_WaitValue = m_CurrentPoint.m_NextValue + 1;
if (m_AttachedPool)
{
m_AttachedPool->Reset();
m_AttachedPool = nullptr;
}
}
void
@ -60,6 +60,13 @@ SyncServer::Entry::Next()
++m_CurrentPoint.m_NextValue;
}
void
SyncServer::Entry::AttachPool(ContextPool *pool)
{
assert(!m_AttachedPool);
m_AttachedPool = pool;
}
systems::Receipt
SyncServer::Allocate()
{
@ -91,8 +98,7 @@ SyncServer::AllocateEntry()
return alloc;
}
m_Allocations.push_back(Entry::Create(*m_Device));
return m_Allocations.back();
return m_Allocations.emplace_back(*m_Device);
}
void

View File

@ -246,7 +246,7 @@ main(int, char **)
.pImageMemoryBarriers = &imageReadyToRead,
};
auto &context = device.CreateTransferContext();
auto context = device.CreateTransferContext();
context.Begin();
context.Dependency(imageReadyToWriteDependency);