471 lines
13 KiB
C++
471 lines
13 KiB
C++
// =============================================
|
|
// Aster: context_pool.h
|
|
// Copyright (c) 2020-2025 Anish Bhobe
|
|
// =============================================
|
|
|
|
#pragma once
|
|
|
|
#include "EASTL/span.h"
|
|
#include "context.h"
|
|
|
|
#include <aster/aster.h>
|
|
|
|
#include <aster/core/buffer.h>
|
|
#include <aster/core/image.h>
|
|
#include <aster/core/image_view.h>
|
|
#include <aster/core/physical_device.h>
|
|
#include <aster/core/pipeline.h>
|
|
|
|
#include <EASTL/intrusive_list.h>
|
|
#include <EASTL/optional.h>
|
|
#include <EASTL/vector.h>
|
|
|
|
#include <foonathan/memory/memory_pool.hpp>
|
|
#include <foonathan/memory/namespace_alias.hpp>
|
|
|
|
namespace systems
|
|
{
|
|
|
|
class RenderingDevice;
|
|
struct Frame;
|
|
|
|
namespace _internal
|
|
{
|
|
class ComputeContextPool;
|
|
class GraphicsContextPool;
|
|
class TransferContextPool;
|
|
class ContextPool;
|
|
} // namespace _internal
|
|
|
|
#define DEPRECATE_RAW_CALLS
|
|
|
|
class Context
|
|
{
|
|
protected:
|
|
_internal::ContextPool *m_Pool;
|
|
vk::CommandBuffer m_Cmd;
|
|
|
|
friend RenderingDevice;
|
|
friend _internal::ContextPool;
|
|
|
|
explicit Context(_internal::ContextPool &pool, vk::CommandBuffer const cmd)
|
|
: m_Pool{&pool}
|
|
, m_Cmd{cmd}
|
|
{
|
|
}
|
|
|
|
/// Keep the resource alive while the command buffers are acting.
|
|
void KeepAlive(Ref<Buffer> const &buffer);
|
|
/// Keep the resource alive while the command buffers are acting.
|
|
void KeepAlive(Ref<Image> const &image);
|
|
/// Keep the resource alive while the command buffers are acting.
|
|
void KeepAlive(Ref<ImageView> const &view);
|
|
|
|
public:
|
|
DEPRECATE_RAW_CALLS void Dependency(vk::DependencyInfo const &dependencyInfo);
|
|
|
|
void Begin();
|
|
void End();
|
|
|
|
void BeginDebugRegion(cstr name, vec4 color = {});
|
|
void EndDebugRegion();
|
|
};
|
|
|
|
// Inline the no-op if not debug.
|
|
#if defined(ASTER_NDEBUG)
|
|
inline void
|
|
Context::BeginDebugRegion(cstr name, vec4 color)
|
|
{
|
|
}
|
|
|
|
inline void
|
|
Context::EndDebugRegion()
|
|
{
|
|
}
|
|
#endif
|
|
|
|
class TransferContext : public Context
|
|
{
|
|
protected:
|
|
friend RenderingDevice;
|
|
friend _internal::TransferContextPool;
|
|
|
|
explicit TransferContext(_internal::ContextPool &pool, vk::CommandBuffer const cmd)
|
|
: Context{pool, cmd}
|
|
{
|
|
}
|
|
|
|
void UploadBuffer(Ref<Buffer> const &buffer, usize size, void const *data);
|
|
|
|
public:
|
|
void UploadTexture(Ref<Image> const &image, eastl::span<u8> const &data);
|
|
|
|
void
|
|
UploadBuffer(Ref<Buffer> const &buffer, std::ranges::range auto const &data)
|
|
{
|
|
auto const span = eastl::span{data.begin(), data.end()};
|
|
UploadBuffer(buffer, span.size_bytes(), span.data());
|
|
}
|
|
|
|
DEPRECATE_RAW_CALLS void Blit(vk::BlitImageInfo2 const &mipBlitInfo);
|
|
|
|
TransferContext(TransferContext &&other) noexcept;
|
|
TransferContext &operator=(TransferContext &&other) noexcept;
|
|
|
|
~TransferContext() = default;
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(TransferContext);
|
|
};
|
|
|
|
class ComputeContext : public TransferContext
|
|
{
|
|
protected:
|
|
friend RenderingDevice;
|
|
friend _internal::ComputeContextPool;
|
|
|
|
Pipeline const *m_PipelineInUse;
|
|
|
|
explicit ComputeContext(_internal::ContextPool &pool, vk::CommandBuffer const cmd)
|
|
: TransferContext{pool, cmd}
|
|
, m_PipelineInUse{nullptr}
|
|
{
|
|
}
|
|
|
|
void PushConstantBlock(usize offset, usize size, void const *data);
|
|
|
|
void Dispatch(Pipeline const &pipeline, u32 x, u32 y, u32 z, usize size, void *data);
|
|
|
|
public:
|
|
void BindPipeline(Pipeline const &pipeline);
|
|
|
|
void
|
|
PushConstantBlock(auto const &block)
|
|
{
|
|
if constexpr (sizeof block > 128)
|
|
WARN("Vulkan only guarantees 128 bytes of Push Constants. Size of PCB is {}", sizeof block);
|
|
PushConstantBlock(0, sizeof block, &block);
|
|
}
|
|
|
|
void
|
|
PushConstantBlock(usize const offset, auto const &block)
|
|
{
|
|
if (offset + sizeof block > 128)
|
|
WARN("Vulkan only guarantees 128 bytes of Push Constants. Size of PCB is {}, at offset {}", sizeof block,
|
|
offset);
|
|
PushConstantBlock(offset, sizeof block, &block);
|
|
}
|
|
|
|
void
|
|
Dispatch(Pipeline const &pipeline, u32 const x, u32 const y, u32 const z, auto &pushConstantBlock)
|
|
{
|
|
if constexpr (sizeof pushConstantBlock > 128)
|
|
WARN("Vulkan only guarantees 128 bytes of Push Constants. Size of PCB is {}", sizeof pushConstantBlock);
|
|
Dispatch(pipeline, x, y, z, sizeof pushConstantBlock, &pushConstantBlock);
|
|
}
|
|
};
|
|
|
|
class GraphicsContext : public ComputeContext
|
|
{
|
|
protected:
|
|
friend RenderingDevice;
|
|
friend _internal::GraphicsContextPool;
|
|
|
|
explicit GraphicsContext(_internal::ContextPool &pool, vk::CommandBuffer const cmd)
|
|
: ComputeContext{pool, cmd}
|
|
{
|
|
}
|
|
|
|
public:
|
|
DEPRECATE_RAW_CALLS void SetViewport(vk::Viewport const &viewport);
|
|
void BindVertexBuffer(Ref<VertexBuffer> const &vertexBuffer);
|
|
void BindIndexBuffer(Ref<IndexBuffer> const &indexBuffer);
|
|
void Draw(usize vertexCount);
|
|
void DrawIndexed(usize indexCount);
|
|
void DrawIndexed(usize indexCount, usize firstIndex, usize firstVertex);
|
|
|
|
DEPRECATE_RAW_CALLS void BeginRendering(vk::RenderingInfo const &renderingInfo);
|
|
void EndRendering();
|
|
|
|
DEPRECATE_RAW_CALLS vk::CommandBuffer
|
|
GetCommandBuffer() const
|
|
{
|
|
return m_Cmd;
|
|
}
|
|
};
|
|
|
|
namespace _internal
|
|
{
|
|
|
|
class ContextPool
|
|
{
|
|
protected:
|
|
RenderingDevice *m_Device;
|
|
vk::CommandPool m_Pool;
|
|
eastl::vector<vk::CommandBuffer> m_CommandBuffers;
|
|
u32 m_BuffersAllocated;
|
|
|
|
public:
|
|
u16 m_ExtraData;
|
|
|
|
enum class ManagedBy : u8
|
|
{
|
|
eFrame,
|
|
eDevice,
|
|
} m_ManagedBy;
|
|
|
|
protected:
|
|
eastl::vector<Ref<Buffer>> m_OwnedBuffers;
|
|
eastl::vector<Ref<Image>> m_OwnedImages;
|
|
eastl::vector<Ref<ImageView>> m_OwnedImageViews;
|
|
|
|
vk::CommandBuffer AllocateCommandBuffer();
|
|
|
|
public:
|
|
[[nodiscard]] RenderingDevice &
|
|
GetDevice() const
|
|
{
|
|
assert(m_Device);
|
|
return *m_Device;
|
|
}
|
|
|
|
eastl::function<void(ContextPool &)> m_ResetCallback;
|
|
|
|
/// Keep the resource alive while the command buffers are acting.
|
|
void KeepAlive(Ref<Buffer> const &buffer);
|
|
/// Keep the resource alive while the command buffers are acting.
|
|
void KeepAlive(Ref<Image> const &image);
|
|
/// Keep the resource alive while the command buffers are acting.
|
|
void KeepAlive(Ref<ImageView> const &view);
|
|
|
|
Context CreateContext();
|
|
|
|
void Reset();
|
|
|
|
ContextPool() = default;
|
|
ContextPool(RenderingDevice &device, u32 queueFamilyIndex, ManagedBy managedBy);
|
|
|
|
ContextPool(ContextPool &&other) noexcept;
|
|
ContextPool &operator=(ContextPool &&other) noexcept;
|
|
|
|
bool
|
|
operator==(ContextPool const &other) const
|
|
{
|
|
return m_Pool == other.m_Pool;
|
|
}
|
|
|
|
~ContextPool();
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(ContextPool);
|
|
};
|
|
|
|
class TransferContextPool : public ContextPool
|
|
{
|
|
public:
|
|
TransferContext CreateTransferContext();
|
|
|
|
TransferContextPool() = default;
|
|
|
|
TransferContextPool(RenderingDevice &device, u32 const queueFamilyIndex, ManagedBy const managedBy)
|
|
: ContextPool{device, queueFamilyIndex, managedBy}
|
|
{
|
|
}
|
|
|
|
TransferContextPool(TransferContextPool &&other) noexcept = default;
|
|
TransferContextPool &operator=(TransferContextPool &&other) noexcept = default;
|
|
|
|
~TransferContextPool() = default;
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(TransferContextPool);
|
|
};
|
|
|
|
class ComputeContextPool : public TransferContextPool
|
|
{
|
|
public:
|
|
ComputeContext CreateComputeContext();
|
|
|
|
ComputeContextPool() = default;
|
|
|
|
ComputeContextPool(RenderingDevice &device, u32 const queueFamilyIndex, ManagedBy const managedBy)
|
|
: TransferContextPool{device, queueFamilyIndex, managedBy}
|
|
{
|
|
}
|
|
|
|
ComputeContextPool(ComputeContextPool &&other) noexcept = default;
|
|
ComputeContextPool &operator=(ComputeContextPool &&other) noexcept = default;
|
|
|
|
~ComputeContextPool() = default;
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(ComputeContextPool);
|
|
};
|
|
|
|
class GraphicsContextPool : public ComputeContextPool
|
|
{
|
|
public:
|
|
GraphicsContext CreateGraphicsContext();
|
|
|
|
GraphicsContextPool() = default;
|
|
|
|
GraphicsContextPool(RenderingDevice &device, u32 const queueFamilyIndex, ManagedBy const managedBy)
|
|
: ComputeContextPool{device, queueFamilyIndex, managedBy}
|
|
{
|
|
}
|
|
|
|
GraphicsContextPool(GraphicsContextPool &&other) noexcept = default;
|
|
GraphicsContextPool &operator=(GraphicsContextPool &&other) noexcept = default;
|
|
|
|
~GraphicsContextPool() = default;
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(GraphicsContextPool);
|
|
};
|
|
|
|
template <std::derived_from<ContextPool> TContextPool>
|
|
class OrderlessContextPool
|
|
{
|
|
using ContextPoolType = TContextPool;
|
|
|
|
struct ContextListEntry : eastl::intrusive_list_node
|
|
{
|
|
ContextPoolType m_Pool;
|
|
|
|
bool
|
|
Contains(ContextPool const &other) const
|
|
{
|
|
return m_Pool == other;
|
|
}
|
|
};
|
|
|
|
using ContextListType = eastl::intrusive_list<ContextListEntry>;
|
|
|
|
RenderingDevice *m_Device;
|
|
memory::memory_pool<> m_ContextPoolEntryMemory;
|
|
ContextListType m_FreeContextPools;
|
|
ContextListType m_UsedContextPools;
|
|
u32 m_QueueFamilyIndex;
|
|
|
|
constexpr static usize ENTRY_SIZE = sizeof(ContextListEntry);
|
|
constexpr static usize ENTRIES_PER_BLOCK = 5;
|
|
constexpr static usize BLOCK_SIZE = ENTRIES_PER_BLOCK * ENTRY_SIZE;
|
|
|
|
public:
|
|
OrderlessContextPool()
|
|
: m_Device{nullptr}
|
|
, m_ContextPoolEntryMemory{ENTRY_SIZE, BLOCK_SIZE}
|
|
, m_QueueFamilyIndex{0}
|
|
{
|
|
}
|
|
|
|
void
|
|
Init(RenderingDevice &device, u32 const queueFamilyIndex)
|
|
{
|
|
m_Device = &device;
|
|
m_QueueFamilyIndex = queueFamilyIndex;
|
|
}
|
|
|
|
TransferContext
|
|
CreateTransferContext()
|
|
requires std::derived_from<TContextPool, TransferContextPool>
|
|
{
|
|
if (!m_FreeContextPools.empty())
|
|
{
|
|
ContextListEntry &entry = m_FreeContextPools.back();
|
|
m_FreeContextPools.pop_back();
|
|
m_UsedContextPools.push_back(entry);
|
|
|
|
return entry.m_Pool.CreateTransferContext();
|
|
}
|
|
|
|
ContextListEntry &entry = *static_cast<ContextListEntry *>(m_ContextPoolEntryMemory.allocate_node());
|
|
auto pool = ContextPoolType{*m_Device, m_QueueFamilyIndex, ContextPool::ManagedBy::eDevice};
|
|
pool.m_ResetCallback = [this](ContextPool &resetPool) { this->ReleasePool(resetPool); };
|
|
new (&entry) ContextListEntry{
|
|
.m_Pool = eastl::move(pool),
|
|
};
|
|
m_UsedContextPools.push_back(entry);
|
|
|
|
return entry.m_Pool.CreateTransferContext();
|
|
}
|
|
|
|
ComputeContext
|
|
CreateComputeContext()
|
|
requires std::derived_from<TContextPool, ComputeContextPool>
|
|
{
|
|
if (!m_FreeContextPools.empty())
|
|
{
|
|
ContextListEntry &entry = m_FreeContextPools.back();
|
|
m_FreeContextPools.pop_back();
|
|
m_UsedContextPools.push_back(entry);
|
|
|
|
return entry.m_Pool.CreateComputeContext();
|
|
}
|
|
|
|
ContextListEntry &entry = *static_cast<ContextListEntry *>(m_ContextPoolEntryMemory.allocate_node());
|
|
auto pool = ContextPoolType{*m_Device, m_QueueFamilyIndex, ContextPool::ManagedBy::eDevice};
|
|
pool.m_ResetCallback = [this](ContextPool &resetPool) { this->ReleasePool(resetPool); };
|
|
new (&entry) ContextListEntry{
|
|
.m_Pool = eastl::move(pool),
|
|
};
|
|
m_UsedContextPools.push_back(entry);
|
|
|
|
return entry.m_Pool.CreateComputeContext();
|
|
}
|
|
|
|
void
|
|
ReleasePool(ContextPool &pool)
|
|
{
|
|
auto const found = eastl::find_if(m_UsedContextPools.begin(), m_UsedContextPools.end(),
|
|
[&pool](ContextListEntry const &v) { return v.Contains(pool); });
|
|
auto &v = *found;
|
|
ContextListType::remove(v);
|
|
|
|
pool.Reset();
|
|
|
|
m_FreeContextPools.push_back(v);
|
|
}
|
|
|
|
OrderlessContextPool(OrderlessContextPool &&other) noexcept
|
|
: m_Device{other.m_Device}
|
|
, m_ContextPoolEntryMemory{std::move(other.m_ContextPoolEntryMemory)}
|
|
, m_FreeContextPools{other.m_FreeContextPools}
|
|
, m_UsedContextPools{other.m_UsedContextPools}
|
|
, m_QueueFamilyIndex{other.m_QueueFamilyIndex}
|
|
{
|
|
other.m_FreeContextPools.clear();
|
|
other.m_UsedContextPools.clear();
|
|
}
|
|
|
|
OrderlessContextPool &
|
|
operator=(OrderlessContextPool &&other) noexcept
|
|
{
|
|
if (this == &other)
|
|
return *this;
|
|
m_Device = other.m_Device;
|
|
m_ContextPoolEntryMemory = std::move(other.m_ContextPoolEntryMemory);
|
|
m_FreeContextPools = other.m_FreeContextPools;
|
|
other.m_FreeContextPools.clear();
|
|
m_UsedContextPools = other.m_UsedContextPools;
|
|
other.m_UsedContextPools.clear();
|
|
m_QueueFamilyIndex = other.m_QueueFamilyIndex;
|
|
return *this;
|
|
}
|
|
|
|
~OrderlessContextPool()
|
|
{
|
|
for (auto &entry : m_FreeContextPools)
|
|
{
|
|
entry.m_Pool.~ContextPoolType();
|
|
}
|
|
for (auto &entry : m_UsedContextPools)
|
|
{
|
|
entry.m_Pool.~ContextPoolType();
|
|
}
|
|
// The allocations will 'wink' away.
|
|
}
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(OrderlessContextPool);
|
|
};
|
|
|
|
using OrderlessTransferContextPool = OrderlessContextPool<TransferContextPool>;
|
|
using OrderlessComputeContextPool = OrderlessContextPool<ComputeContextPool>;
|
|
|
|
} // namespace _internal
|
|
} // namespace systems
|