ContextPools for Frames.

This commit is contained in:
Anish Bhobe 2025-05-17 15:25:33 +02:00
parent 8e2c77bcf1
commit 3b4ea52611
8 changed files with 735 additions and 466 deletions

View File

@ -13,6 +13,7 @@
#include <glm/glm.hpp>
#include <fmt/format.h>
#include <EASTL/shared_ptr.h>
// Macros that can collide with functions.
#if defined(max)
@ -252,7 +253,7 @@ struct fmt::formatter<eastl::fixed_string<TType, TCount, TOverflow>> : nested_fo
};
template <typename T>
using Ref = std::shared_ptr<T>;
using Ref = eastl::shared_ptr<T>;
template <typename T>
using WeakRef = std::weak_ptr<T>;
using WeakRef = eastl::weak_ptr<T>;

View File

@ -6,4 +6,5 @@ target_sources(aster_core
INTERFACE
"device.h"
"resource.h"
"context.h"
"commit_manager.h")

View File

@ -0,0 +1,244 @@
// =============================================
// Aster: context_pool.h
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#pragma once
#include <aster/aster.h>
#include <aster/core/buffer.h>
#include <aster/core/image.h>
#include <aster/core/image_view.h>
#include <aster/core/pipeline.h>
#include <EASTL/list.h>
#include <EASTL/vector.h>
namespace systems
{
class Device;
struct Frame;
namespace _internal
{
class GraphicsContextPool;
class TransferContextPool;
class ContextPool;
} // namespace _internal
#define DEPRECATE_RAW_CALLS
class Context
{
protected:
_internal::ContextPool *m_Pool;
vk::CommandBuffer m_Cmd;
friend Device;
friend _internal::ContextPool;
explicit Context(_internal::ContextPool &pool, const vk::CommandBuffer cmd)
: m_Pool{&pool}
, m_Cmd{cmd}
{
}
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(const Ref<Buffer> &buffer);
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(const Ref<Image> &image);
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(const Ref<ImageView> &view);
public:
DEPRECATE_RAW_CALLS void Dependency(const vk::DependencyInfo &dependencyInfo);
void Begin();
void End();
};
class TransferContext : public Context
{
protected:
friend Device;
friend _internal::TransferContextPool;
explicit TransferContext(_internal::ContextPool &pool, const vk::CommandBuffer cmd)
: Context{pool, cmd}
{
}
public:
struct ImageData
{
void *m_Data;
usize m_NumBytes;
};
void UploadTexture(const Ref<Image> &image, const ImageData &data);
TransferContext(TransferContext &&other) noexcept;
TransferContext &operator=(TransferContext &&other) noexcept;
~TransferContext() = default;
DISALLOW_COPY_AND_ASSIGN(TransferContext);
};
class GraphicsContext : public Context
{
protected:
friend Device;
friend _internal::GraphicsContextPool;
const Pipeline *m_PipelineInUse;
explicit GraphicsContext(_internal::ContextPool &pool, const vk::CommandBuffer cmd)
: Context{pool, cmd}
, m_PipelineInUse{nullptr}
{
}
public:
DEPRECATE_RAW_CALLS void SetViewport(const vk::Viewport &viewport);
void BindVertexBuffer(const Ref<VertexBuffer> &vertexBuffer);
void BindPipeline(const Pipeline &pipeline);
void
PushConstantBlock(auto &block)
{
if constexpr (sizeof(block) > 128)
{
WARN("Vulkan only guarantees 128 bytes of Push Constants. Size of PCB is {}", sizeof block);
}
m_Cmd.pushConstants(m_PipelineInUse->m_Layout, vk::ShaderStageFlagBits::eAll, 0, sizeof block, &block);
}
void Draw(usize vertexCount);
void DrawIndexed(usize indexCount);
DEPRECATE_RAW_CALLS void BeginRendering(const vk::RenderingInfo &renderingInfo);
void EndRendering();
};
namespace _internal
{
class ContextPool
{
protected:
Device *m_Device;
vk::CommandPool m_Pool;
eastl::vector<vk::CommandBuffer> m_CommandBuffers;
u32 m_BuffersAllocated;
eastl::vector<Ref<Buffer>> m_OwnedBuffers;
eastl::vector<Ref<Image>> m_OwnedImages;
eastl::vector<Ref<ImageView>> m_OwnedImageViews;
vk::CommandBuffer AllocateCommandBuffer();
void Destroy();
public:
[[nodiscard]] Device &
GetDevice() const
{
assert(m_Device);
return *m_Device;
}
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(const Ref<Buffer> &buffer);
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(const Ref<Image> &image);
/// Keep the resource alive while the command buffers are acting.
void KeepAlive(const Ref<ImageView> &view);
Context CreateContext();
void Reset();
ContextPool() = default;
ContextPool(Device &device, u32 queueFamilyIndex);
ContextPool(ContextPool &&other) noexcept;
ContextPool &operator=(ContextPool &&other) noexcept;
~ContextPool();
DISALLOW_COPY_AND_ASSIGN(ContextPool);
};
class TransferContextPool : public ContextPool
{
public:
TransferContext CreateTransferContext();
TransferContextPool() = default;
TransferContextPool(Device &device, const u32 queueFamilyIndex)
: ContextPool{device, queueFamilyIndex}
{
}
TransferContextPool(TransferContextPool &&other) noexcept
: ContextPool{std::move(other)}
{
}
TransferContextPool &
operator=(TransferContextPool &&other) noexcept
{
if (this == &other)
return *this;
ContextPool::operator=(std::move(other));
return *this;
}
~TransferContextPool()
{
Destroy();
}
DISALLOW_COPY_AND_ASSIGN(TransferContextPool);
};
//
// class ComputeContextPool : public TransferContextPool
//{
// ComputeCon CreateTransferContext();
//};
class GraphicsContextPool : public TransferContextPool
{
public:
GraphicsContext CreateGraphicsContext();
GraphicsContextPool() = default;
GraphicsContextPool(Device &device, const u32 queueFamilyIndex)
: TransferContextPool{device, queueFamilyIndex}
{
}
GraphicsContextPool(GraphicsContextPool &&other) noexcept
: TransferContextPool{std::move(other)}
{
}
GraphicsContextPool &
operator=(GraphicsContextPool &&other) noexcept
{
if (this == &other)
return *this;
TransferContextPool::operator=(std::move(other));
return *this;
}
~GraphicsContextPool()
{
Destroy();
}
DISALLOW_COPY_AND_ASSIGN(GraphicsContextPool);
};
} // namespace _internal
} // namespace systems

View File

@ -5,6 +5,7 @@
#pragma once
#include "context.h"
#include "pipeline_helpers.h"
#include "resource.h"
@ -23,6 +24,7 @@
#include "EASTL/deque.h"
#include <EASTL/hash_map.h>
#include <EASTL/optional.h>
#include <EASTL/variant.h>
#include <slang-com-ptr.h>
#include <slang.h>
@ -343,127 +345,34 @@ class Receipt
friend _internal::SyncServer;
};
class Device;
struct Frame;
#define DEPRECATE_RAW_CALLS
class Context
{
protected:
vk::CommandBuffer m_Cmd;
friend Device;
friend Frame;
explicit Context(const vk::CommandBuffer cmd)
: m_Cmd{cmd}
{
}
public:
DEPRECATE_RAW_CALLS void Dependency(const vk::DependencyInfo &dependencyInfo);
void Begin();
void End();
};
class GraphicsContext : public Context
{
protected:
friend Device;
friend Frame;
const Pipeline *m_PipelineInUse;
explicit GraphicsContext(const vk::CommandBuffer cmd)
: Context{cmd}
, m_PipelineInUse{nullptr}
{
}
public:
DEPRECATE_RAW_CALLS void SetViewport(const vk::Viewport &viewport);
void BindVertexBuffer(const Ref<VertexBuffer> &vertexBuffer);
void BindPipeline(const Pipeline &pipeline);
void
PushConstantBlock(auto &block)
{
if constexpr (sizeof(block) > 128)
{
WARN("Vulkan only guarantees 128 bytes of Push Constants. Size of PCB is {}", sizeof block);
}
m_Cmd.pushConstants(m_PipelineInUse->m_Layout, vk::ShaderStageFlagBits::eAll, 0, sizeof block, &block);
}
void Draw(usize vertexCount);
void DrawIndexed(usize indexCount);
DEPRECATE_RAW_CALLS void BeginRendering(const vk::RenderingInfo &renderingInfo);
void EndRendering();
};
class TransferContext : public Context
{
protected:
friend Device;
friend Frame;
Device *m_Device;
explicit TransferContext(Device &device, const vk::CommandBuffer cmd)
: Context{cmd}
, m_Device{&device}
{
}
eastl::vector<Ref<Buffer>> m_OwnedBuffers;
eastl::vector<Ref<Image>> m_OwnedImages;
void Reset();
public:
struct ImageData
{
void *m_Data;
usize m_NumBytes;
};
void UploadTexture(const Ref<Image> &image, const ImageData &data);
TransferContext(TransferContext &&other) noexcept;
TransferContext &operator=(TransferContext &&other) noexcept;
DISALLOW_COPY_AND_ASSIGN(TransferContext);
~TransferContext() = default;
};
struct Frame
{
// Persistent
Device *m_Device;
// TODO: ThreadSafe
vk::CommandPool m_Pool;
_internal::GraphicsContextPool m_PrimaryPool;
_internal::TransferContextPool m_AsyncTransferPool;
_internal::ContextPool m_AsyncComputePool;
vk::Fence m_FrameAvailableFence;
vk::Semaphore m_ImageAcquireSem;
vk::Semaphore m_RenderFinishSem;
u32 m_FrameIdx;
eastl::vector<vk::CommandBuffer> m_CommandBuffers;
// Transient
vk::Image m_SwapchainImage;
vk::ImageView m_SwapchainImageView;
Size2D m_SwapchainSize;
u32 m_ImageIdx;
u32 m_CommandBuffersAllocated;
void Reset(u32 imageIdx, vk::Image swapchainImage, vk::ImageView swapchainImageView, Size2D swapchainSize);
GraphicsContext CreateGraphicsContext();
TransferContext CreateTransferContext();
TransferContext CreateAsyncTransferContext();
void WaitUntilReady();
Frame() = default;
Frame(Device &device, u32 primaryQueueFamily, u32 frameIndex);
Frame(Device &device, u32 frameIndex, u32 primaryQueueFamily, u32 asyncTransferQueue, u32 asyncComputeQueue);
Frame(Frame &&other) noexcept;
Frame &operator=(Frame &&other) noexcept;
@ -486,12 +395,15 @@ class Device final
std::unique_ptr<CommitManager> m_CommitManager;
// TODO: This is single-threaded.
vk::Queue m_GraphicsQueue;
vk::Queue m_PrimaryQueue;
u32 m_PrimaryQueueFamily;
vk::Queue m_TransferQueue;
u32 m_TransferQueueFamily;
vk::Queue m_ComputeQueue;
u32 m_ComputeQueueFamily;
std::array<Frame, MAX_FRAMES_IN_FLIGHT> m_Frames;
u32 m_CurrentFrameIdx = 0;

View File

@ -41,7 +41,7 @@ CastImage(const Ref<TFrom> &from)
{
if constexpr (not concepts::ImageInto<TFrom, TTo>)
assert(TTo::FLAGS & from->m_Flags_);
return std::reinterpret_pointer_cast<TTo>(from);
return eastl::reinterpret_pointer_cast<TTo>(from);
}
#pragma endregion
@ -54,7 +54,7 @@ CastView(const Ref<View<TFrom>> &from)
{
if constexpr (not concepts::ImageInto<TFrom, typename TTo::ImageType>)
assert(TTo::ImageType::FLAGS & from->m_Image->m_Flags_);
return std::reinterpret_pointer_cast<TTo>(from);
return eastl::reinterpret_pointer_cast<TTo>(from);
}
#pragma endregion

View File

@ -7,4 +7,5 @@ PRIVATE
"device.cpp"
"commit_manager.cpp"
"pipeline_helpers.cpp"
"context.cpp"
"sync_server.cpp")

View File

@ -0,0 +1,399 @@
// =============================================
// Aster: context.cpp
// Copyright (c) 2020-2025 Anish Bhobe
// =============================================
#include "aster/systems/context.h"
#include "aster/systems/commit_manager.h"
#include "systems/device.h"
constexpr static u32
GetFormatSize(const vk::Format format)
{
switch (format)
{
case vk::Format::eUndefined:
return 0;
case vk::Format::eR8Unorm:
case vk::Format::eR8Snorm:
case vk::Format::eR8Uscaled:
case vk::Format::eR8Sscaled:
case vk::Format::eR8Uint:
case vk::Format::eR8Sint:
case vk::Format::eR8Srgb:
return 1;
case vk::Format::eR8G8Unorm:
case vk::Format::eR8G8Snorm:
case vk::Format::eR8G8Uscaled:
case vk::Format::eR8G8Sscaled:
case vk::Format::eR8G8Uint:
case vk::Format::eR8G8Sint:
case vk::Format::eR8G8Srgb:
return 2;
case vk::Format::eR8G8B8Unorm:
case vk::Format::eR8G8B8Snorm:
case vk::Format::eR8G8B8Uscaled:
case vk::Format::eR8G8B8Sscaled:
case vk::Format::eR8G8B8Uint:
case vk::Format::eR8G8B8Sint:
case vk::Format::eR8G8B8Srgb:
case vk::Format::eB8G8R8Unorm:
case vk::Format::eB8G8R8Snorm:
case vk::Format::eB8G8R8Uscaled:
case vk::Format::eB8G8R8Sscaled:
case vk::Format::eB8G8R8Uint:
case vk::Format::eB8G8R8Sint:
case vk::Format::eB8G8R8Srgb:
return 3;
case vk::Format::eR8G8B8A8Unorm:
case vk::Format::eR8G8B8A8Snorm:
case vk::Format::eR8G8B8A8Uscaled:
case vk::Format::eR8G8B8A8Sscaled:
case vk::Format::eR8G8B8A8Uint:
case vk::Format::eR8G8B8A8Sint:
case vk::Format::eR8G8B8A8Srgb:
case vk::Format::eB8G8R8A8Unorm:
case vk::Format::eB8G8R8A8Snorm:
case vk::Format::eB8G8R8A8Uscaled:
case vk::Format::eB8G8R8A8Sscaled:
case vk::Format::eB8G8R8A8Uint:
case vk::Format::eB8G8R8A8Sint:
case vk::Format::eB8G8R8A8Srgb:
return 4;
case vk::Format::eR16Unorm:
case vk::Format::eR16Snorm:
case vk::Format::eR16Uscaled:
case vk::Format::eR16Sscaled:
case vk::Format::eR16Uint:
case vk::Format::eR16Sint:
case vk::Format::eR16Sfloat:
return 2;
case vk::Format::eR16G16Unorm:
case vk::Format::eR16G16Snorm:
case vk::Format::eR16G16Uscaled:
case vk::Format::eR16G16Sscaled:
case vk::Format::eR16G16Uint:
case vk::Format::eR16G16Sint:
case vk::Format::eR16G16Sfloat:
return 4;
case vk::Format::eR16G16B16Unorm:
case vk::Format::eR16G16B16Snorm:
case vk::Format::eR16G16B16Uscaled:
case vk::Format::eR16G16B16Sscaled:
case vk::Format::eR16G16B16Uint:
case vk::Format::eR16G16B16Sint:
case vk::Format::eR16G16B16Sfloat:
return 6;
case vk::Format::eR16G16B16A16Unorm:
case vk::Format::eR16G16B16A16Snorm:
case vk::Format::eR16G16B16A16Uscaled:
case vk::Format::eR16G16B16A16Sscaled:
case vk::Format::eR16G16B16A16Uint:
case vk::Format::eR16G16B16A16Sint:
case vk::Format::eR16G16B16A16Sfloat:
return 8;
case vk::Format::eR32Uint:
case vk::Format::eR32Sint:
case vk::Format::eR32Sfloat:
return 4;
case vk::Format::eR32G32Uint:
case vk::Format::eR32G32Sint:
case vk::Format::eR32G32Sfloat:
return 8;
case vk::Format::eR32G32B32Uint:
case vk::Format::eR32G32B32Sint:
case vk::Format::eR32G32B32Sfloat:
return 12;
case vk::Format::eR32G32B32A32Uint:
case vk::Format::eR32G32B32A32Sint:
case vk::Format::eR32G32B32A32Sfloat:
return 16;
case vk::Format::eD16Unorm:
return 2;
case vk::Format::eD32Sfloat:
return 4;
case vk::Format::eS8Uint:
return 1;
case vk::Format::eD16UnormS8Uint:
return 6;
case vk::Format::eD24UnormS8Uint:
return 4;
case vk::Format::eD32SfloatS8Uint:
return 5;
default:
TODO("Esoteric Formats");
}
return 0;
}
void
systems::Context::KeepAlive(const Ref<Buffer> &buffer)
{
assert(m_Pool);
m_Pool->KeepAlive(buffer);
}
void
systems::Context::KeepAlive(const Ref<Image> &image)
{
assert(m_Pool);
m_Pool->KeepAlive(image);
}
void
systems::Context::KeepAlive(const Ref<ImageView> &view)
{
assert(m_Pool);
m_Pool->KeepAlive(view);
}
void
systems::Context::Dependency(const vk::DependencyInfo &dependencyInfo)
{
m_Cmd.pipelineBarrier2(&dependencyInfo);
}
void
systems::Context::Begin()
{
vk::CommandBufferBeginInfo commandBufferBeginInfo = {
.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit,
};
auto result = m_Cmd.begin(&commandBufferBeginInfo);
ERROR_IF(Failed(result), "Could not begin context") THEN_ABORT(result);
}
void
systems::Context::End()
{
auto result = m_Cmd.end();
ERROR_IF(Failed(result), "Could not end context") THEN_ABORT(result);
}
void
systems::GraphicsContext::SetViewport(const vk::Viewport &viewport)
{
m_Cmd.setViewport(0, 1, &viewport);
}
void
systems::GraphicsContext::BindVertexBuffer(const Ref<VertexBuffer> &vertexBuffer)
{
constexpr vk::DeviceSize offset = 0;
m_Cmd.bindVertexBuffers(0, 1, &vertexBuffer->m_Buffer, &offset);
}
void
systems::GraphicsContext::BindPipeline(const Pipeline &pipeline)
{
m_Cmd.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline.m_Pipeline);
// TODO: Maybe find a smarter place to host this.
if (CommitManager::IsInit())
{
m_Cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline.m_Layout, 0, 1,
&CommitManager::Instance().GetDescriptorSet(), 0, nullptr);
}
m_PipelineInUse = &pipeline;
}
void
systems::GraphicsContext::Draw(const usize vertexCount)
{
m_Cmd.draw(static_cast<u32>(vertexCount), 1, 0, 0);
}
void
systems::GraphicsContext::DrawIndexed(usize indexCount)
{
m_Cmd.drawIndexed(static_cast<u32>(indexCount), 1, 0, 0, 0);
}
void
systems::GraphicsContext::BeginRendering(const vk::RenderingInfo &renderingInfo)
{
m_Cmd.beginRendering(&renderingInfo);
m_Cmd.setScissor(0, 1, &renderingInfo.renderArea);
}
void
systems::GraphicsContext::EndRendering()
{
m_Cmd.endRendering();
}
void
systems::TransferContext::UploadTexture(const Ref<Image> &image, const ImageData &data)
{
ERROR_IF(not(image and image->IsValid()), "Invalid image");
auto [w, h, d] = image->m_Extent;
auto formatSize = GetFormatSize(image->m_Format);
auto expectedByteSize = static_cast<u64>(w) * static_cast<u64>(h) * static_cast<u64>(d) * formatSize;
ERROR_IF(expectedByteSize != data.m_NumBytes, "Mismatch in data size {} vs image size {} ({}x{}x{}x{})",
data.m_NumBytes, expectedByteSize, w, h, d, formatSize);
const Ref<StagingBuffer> stagingBuffer = m_Pool->GetDevice().CreateStagingBuffer(data.m_NumBytes);
stagingBuffer->Write(0, data.m_NumBytes, data.m_Data);
const vk::BufferImageCopy bufferImageCopy = {
.bufferOffset = 0,
.bufferRowLength = w,
.bufferImageHeight = h,
.imageSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {},
.imageExtent = image->m_Extent,
};
m_Cmd.copyBufferToImage(stagingBuffer->m_Buffer, image->m_Image, vk::ImageLayout::eTransferDstOptimal, 1,
&bufferImageCopy);
KeepAlive(stagingBuffer);
KeepAlive(image);
}
systems::TransferContext::TransferContext(TransferContext &&other) noexcept
: Context{std::move(other)}
{
}
systems::TransferContext &
systems::TransferContext::operator=(TransferContext &&other) noexcept
{
if (this == &other)
return *this;
Context::operator=(std::move(other));
return *this;
}
using namespace systems::_internal;
ContextPool::ContextPool(Device &device, const u32 queueFamilyIndex)
: m_Device{&device}
, m_BuffersAllocated{0}
{
const vk::CommandPoolCreateInfo commandPoolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient,
.queueFamilyIndex = queueFamilyIndex,
};
AbortIfFailed(device.m_Device->createCommandPool(&commandPoolCreateInfo, nullptr, &m_Pool));
}
ContextPool::ContextPool(ContextPool &&other) noexcept
: m_Device{other.m_Device}
, m_Pool{Take(other.m_Pool)}
, m_CommandBuffers{std::move(other.m_CommandBuffers)}
, m_BuffersAllocated{other.m_BuffersAllocated}
, m_OwnedBuffers{std::move(other.m_OwnedBuffers)}
, m_OwnedImages{std::move(other.m_OwnedImages)}
, m_OwnedImageViews{std::move(other.m_OwnedImageViews)}
{
}
ContextPool &
ContextPool::operator=(ContextPool &&other) noexcept
{
if (this == &other)
return *this;
using eastl::swap;
swap(m_Device, other.m_Device);
swap(m_Pool, other.m_Pool);
swap(m_CommandBuffers, other.m_CommandBuffers);
swap(m_BuffersAllocated, other.m_BuffersAllocated);
swap(m_OwnedBuffers, other.m_OwnedBuffers);
swap(m_OwnedImages, other.m_OwnedImages);
swap(m_OwnedImageViews, other.m_OwnedImageViews);
return *this;
}
ContextPool::~ContextPool()
{
Destroy();
}
void
ContextPool::KeepAlive(const Ref<Buffer> &buffer)
{
m_OwnedBuffers.push_back(buffer);
}
void
ContextPool::KeepAlive(const Ref<Image> &image)
{
m_OwnedImages.push_back(image);
}
void
ContextPool::KeepAlive(const Ref<ImageView> &view)
{
m_OwnedImageViews.push_back(view);
}
vk::CommandBuffer
ContextPool::AllocateCommandBuffer()
{
// Buffers are available.
if (m_BuffersAllocated < m_CommandBuffers.size())
{
return m_CommandBuffers[m_BuffersAllocated++];
}
// Allocate New Buffer.
const vk::CommandBufferAllocateInfo allocateInfo = {
.commandPool = m_Pool,
.level = vk::CommandBufferLevel::ePrimary,
.commandBufferCount = 1,
};
vk::CommandBuffer &cmd = m_CommandBuffers.emplace_back();
AbortIfFailed(m_Device->m_Device->allocateCommandBuffers(&allocateInfo, &cmd));
return cmd;
}
void
ContextPool::Destroy()
{
if (!m_Pool)
return;
m_Device->m_Device->destroy(Take(m_Pool), nullptr);
}
systems::Context
ContextPool::CreateContext()
{
return Context{*this, AllocateCommandBuffer()};
}
void
ContextPool::Reset()
{
assert(m_Pool);
AbortIfFailed(m_Device->m_Device->resetCommandPool(m_Pool, {}));
m_BuffersAllocated = 0;
m_OwnedBuffers.clear();
m_OwnedImages.clear();
m_OwnedImageViews.clear();
}
systems::TransferContext
TransferContextPool::CreateTransferContext()
{
return TransferContext{*this, AllocateCommandBuffer()};
}
systems::GraphicsContext
GraphicsContextPool::CreateGraphicsContext()
{
return GraphicsContext{*this, AllocateCommandBuffer()};
}

View File

@ -20,126 +20,6 @@ static constexpr QueueSupportFlags REQUIRED_QUEUE_SUPPORT =
QueueSupportFlags{} | QueueSupportFlagBits::eGraphics | QueueSupportFlagBits::eCompute |
QueueSupportFlagBits::ePresent | QueueSupportFlagBits::eTransfer;
constexpr static u32
GetFormatSize(const vk::Format format)
{
switch (format)
{
case vk::Format::eUndefined:
return 0;
case vk::Format::eR8Unorm:
case vk::Format::eR8Snorm:
case vk::Format::eR8Uscaled:
case vk::Format::eR8Sscaled:
case vk::Format::eR8Uint:
case vk::Format::eR8Sint:
case vk::Format::eR8Srgb:
return 1;
case vk::Format::eR8G8Unorm:
case vk::Format::eR8G8Snorm:
case vk::Format::eR8G8Uscaled:
case vk::Format::eR8G8Sscaled:
case vk::Format::eR8G8Uint:
case vk::Format::eR8G8Sint:
case vk::Format::eR8G8Srgb:
return 2;
case vk::Format::eR8G8B8Unorm:
case vk::Format::eR8G8B8Snorm:
case vk::Format::eR8G8B8Uscaled:
case vk::Format::eR8G8B8Sscaled:
case vk::Format::eR8G8B8Uint:
case vk::Format::eR8G8B8Sint:
case vk::Format::eR8G8B8Srgb:
case vk::Format::eB8G8R8Unorm:
case vk::Format::eB8G8R8Snorm:
case vk::Format::eB8G8R8Uscaled:
case vk::Format::eB8G8R8Sscaled:
case vk::Format::eB8G8R8Uint:
case vk::Format::eB8G8R8Sint:
case vk::Format::eB8G8R8Srgb:
return 3;
case vk::Format::eR8G8B8A8Unorm:
case vk::Format::eR8G8B8A8Snorm:
case vk::Format::eR8G8B8A8Uscaled:
case vk::Format::eR8G8B8A8Sscaled:
case vk::Format::eR8G8B8A8Uint:
case vk::Format::eR8G8B8A8Sint:
case vk::Format::eR8G8B8A8Srgb:
case vk::Format::eB8G8R8A8Unorm:
case vk::Format::eB8G8R8A8Snorm:
case vk::Format::eB8G8R8A8Uscaled:
case vk::Format::eB8G8R8A8Sscaled:
case vk::Format::eB8G8R8A8Uint:
case vk::Format::eB8G8R8A8Sint:
case vk::Format::eB8G8R8A8Srgb:
return 4;
case vk::Format::eR16Unorm:
case vk::Format::eR16Snorm:
case vk::Format::eR16Uscaled:
case vk::Format::eR16Sscaled:
case vk::Format::eR16Uint:
case vk::Format::eR16Sint:
case vk::Format::eR16Sfloat:
return 2;
case vk::Format::eR16G16Unorm:
case vk::Format::eR16G16Snorm:
case vk::Format::eR16G16Uscaled:
case vk::Format::eR16G16Sscaled:
case vk::Format::eR16G16Uint:
case vk::Format::eR16G16Sint:
case vk::Format::eR16G16Sfloat:
return 4;
case vk::Format::eR16G16B16Unorm:
case vk::Format::eR16G16B16Snorm:
case vk::Format::eR16G16B16Uscaled:
case vk::Format::eR16G16B16Sscaled:
case vk::Format::eR16G16B16Uint:
case vk::Format::eR16G16B16Sint:
case vk::Format::eR16G16B16Sfloat:
return 6;
case vk::Format::eR16G16B16A16Unorm:
case vk::Format::eR16G16B16A16Snorm:
case vk::Format::eR16G16B16A16Uscaled:
case vk::Format::eR16G16B16A16Sscaled:
case vk::Format::eR16G16B16A16Uint:
case vk::Format::eR16G16B16A16Sint:
case vk::Format::eR16G16B16A16Sfloat:
return 8;
case vk::Format::eR32Uint:
case vk::Format::eR32Sint:
case vk::Format::eR32Sfloat:
return 4;
case vk::Format::eR32G32Uint:
case vk::Format::eR32G32Sint:
case vk::Format::eR32G32Sfloat:
return 8;
case vk::Format::eR32G32B32Uint:
case vk::Format::eR32G32B32Sint:
case vk::Format::eR32G32B32Sfloat:
return 12;
case vk::Format::eR32G32B32A32Uint:
case vk::Format::eR32G32B32A32Sint:
case vk::Format::eR32G32B32A32Sfloat:
return 16;
case vk::Format::eD16Unorm:
return 2;
case vk::Format::eD32Sfloat:
return 4;
case vk::Format::eS8Uint:
return 1;
case vk::Format::eD16UnormS8Uint:
return 6;
case vk::Format::eD24UnormS8Uint:
return 4;
case vk::Format::eD32SfloatS8Uint:
return 5;
default:
TODO("Esoteric Formats");
}
return 0;
}
PhysicalDevice
systems::DefaultPhysicalDeviceSelector(const PhysicalDevices &physicalDevices)
{
@ -182,7 +62,7 @@ systems::Device::CreateStorageBuffer(const usize size, const cstr name)
VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT |
VMA_ALLOCATION_CREATE_MAPPED_BIT;
constexpr VmaMemoryUsage memoryUsage = VMA_MEMORY_USAGE_AUTO;
return std::make_shared<StorageBuffer>(Buffer{&m_Device, size, usage, createFlags, memoryUsage, name});
return eastl::make_shared<StorageBuffer>(Buffer{&m_Device, size, usage, createFlags, memoryUsage, name});
}
Ref<UniformBuffer>
@ -193,7 +73,7 @@ systems::Device::CreateUniformBuffer(const usize size, const cstr name)
VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT |
VMA_ALLOCATION_CREATE_MAPPED_BIT;
constexpr VmaMemoryUsage memoryUsage = VMA_MEMORY_USAGE_AUTO;
return std::make_shared<UniformBuffer>(Buffer{&m_Device, size, usage, createFlags, memoryUsage, name});
return eastl::make_shared<UniformBuffer>(Buffer{&m_Device, size, usage, createFlags, memoryUsage, name});
}
Ref<StagingBuffer>
@ -203,7 +83,7 @@ systems::Device::CreateStagingBuffer(const usize size, const cstr name)
constexpr VmaAllocationCreateFlags createFlags =
VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT;
constexpr VmaMemoryUsage memoryUsage = VMA_MEMORY_USAGE_AUTO;
return std::make_shared<StagingBuffer>(Buffer{&m_Device, size, usage, createFlags, memoryUsage, name});
return eastl::make_shared<StagingBuffer>(Buffer{&m_Device, size, usage, createFlags, memoryUsage, name});
}
Ref<VertexBuffer>
@ -213,7 +93,7 @@ systems::Device::CreateVertexBuffer(const usize size, const cstr name)
constexpr VmaAllocationCreateFlags createFlags =
VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT;
constexpr VmaMemoryUsage memoryUsage = VMA_MEMORY_USAGE_AUTO;
return std::make_shared<VertexBuffer>(Buffer{&m_Device, size, usage, createFlags, memoryUsage, name});
return eastl::make_shared<VertexBuffer>(Buffer{&m_Device, size, usage, createFlags, memoryUsage, name});
}
#pragma endregion
@ -265,8 +145,8 @@ systems::Device::CreateTexture2D(const Texture2DCreateInfo &createInfo)
m_Device.SetName(image, createInfo.m_Name);
return std::make_shared<Image>(&m_Device, image, allocation, imageCreateInfo.extent, imageCreateInfo.format, flags,
layerCount, mipLevels);
return eastl::make_shared<Image>(&m_Device, image, allocation, imageCreateInfo.extent, imageCreateInfo.format,
flags, layerCount, mipLevels);
}
Ref<ImageCube>
@ -297,8 +177,8 @@ systems::Device::CreateTextureCube(const TextureCubeCreateInfo &createInfo)
m_Device.SetName(image, createInfo.m_Name);
return CastImage<ImageCube>(std::make_shared<Image>(&m_Device, image, allocation, imageCreateInfo.extent,
imageCreateInfo.format, flags, layerCount, mipLevels));
return CastImage<ImageCube>(eastl::make_shared<Image>(&m_Device, image, allocation, imageCreateInfo.extent,
imageCreateInfo.format, flags, layerCount, mipLevels));
}
Ref<Image>
@ -324,8 +204,8 @@ systems::Device::CreateAttachment(const AttachmentCreateInfo &createInfo)
m_Device.SetName(image, createInfo.m_Name);
return std::make_shared<Image>(&m_Device, image, allocation, imageCreateInfo.extent, imageCreateInfo.format,
Image::Flags{}, layerCount, mipLevels);
return eastl::make_shared<Image>(&m_Device, image, allocation, imageCreateInfo.extent, imageCreateInfo.format,
Image::Flags{}, layerCount, mipLevels);
}
Ref<Image>
@ -351,8 +231,8 @@ systems::Device::CreateDepthStencilImage(const DepthStencilImageCreateInfo &crea
m_Device.SetName(image, createInfo.m_Name);
return std::make_shared<Image>(&m_Device, image, allocation, imageCreateInfo.extent, imageCreateInfo.format,
Image::Flags{}, layerCount, mipLevels);
return eastl::make_shared<Image>(&m_Device, image, allocation, imageCreateInfo.extent, imageCreateInfo.format,
Image::Flags{}, layerCount, mipLevels);
}
vk::ImageCreateInfo
@ -470,8 +350,8 @@ systems::Device::CreateView(const ViewCreateInfo<Image> &createInfo)
m_Device.SetName(view, createInfo.m_Name);
return std::make_shared<ImageView>(createInfo.m_Image, view, createInfo.m_Image->m_Extent, createInfo.m_BaseLayer,
layerCount, createInfo.m_BaseMipLevel, mipCount);
return eastl::make_shared<ImageView>(createInfo.m_Image, view, createInfo.m_Image->m_Extent, createInfo.m_BaseLayer,
layerCount, createInfo.m_BaseMipLevel, mipCount);
}
#pragma endregion
@ -541,7 +421,7 @@ systems::Device::CreateSampler(const SamplerCreateInfo &createInfo)
return iter->second.lock();
}
auto object = std::make_shared<Sampler>(&m_Device, vkCreateInfo, createInfo.m_Name ? createInfo.m_Name : nullptr);
auto object = eastl::make_shared<Sampler>(&m_Device, vkCreateInfo, createInfo.m_Name ? createInfo.m_Name : nullptr);
m_HashToSamplerIdx.emplace(vkCreateInfo, object);
return object;
@ -1029,14 +909,27 @@ systems::Device::Device(const DeviceCreateInfo &createInfo)
m_TransferQueueFamily = m_PrimaryQueueFamily;
}
// TODO: Async Compute
u32 computeQueueIndex;
if (const auto asyncCompute = FindAsyncComputeQueue(physicalDevice, m_PrimaryQueueFamily))
{
const QueueAllocation allocation = asyncCompute.value();
queueAllocations.push_back(allocation);
m_ComputeQueueFamily = allocation.m_Family;
computeQueueIndex = 0;
}
else
{
computeQueueIndex = primaryQueue.m_Count;
++primaryQueue.m_Count;
m_ComputeQueueFamily = m_PrimaryQueueFamily;
}
m_Device = ::Device{m_Instance, physicalDevice, features, queueAllocations, createInfo.m_PipelineCacheData,
createInfo.m_Name};
m_GraphicsQueue = m_Device.GetQueue(m_PrimaryQueueFamily, primaryQueueIndex);
m_PrimaryQueue = m_Device.GetQueue(m_PrimaryQueueFamily, primaryQueueIndex);
m_TransferQueue = m_Device.GetQueue(m_TransferQueueFamily, transferQueueIndex);
m_ComputeQueue = m_Device.GetQueue(m_ComputeQueueFamily, computeQueueIndex);
m_Swapchain = Swapchain{m_Surface, m_Device, m_Window.get().GetSize()};
@ -1091,7 +984,7 @@ systems::Device::Device(const DeviceCreateInfo &createInfo)
u32 index = 0;
for (auto &frame : m_Frames)
{
frame = Frame(*this, m_PrimaryQueueFamily, index++);
frame = Frame(*this, index++, m_PrimaryQueueFamily, m_TransferQueueFamily, m_ComputeQueueFamily);
}
}
@ -1102,7 +995,6 @@ systems::Device::~Device()
m_Device->destroy(Take(frame.m_FrameAvailableFence), nullptr);
m_Device->destroy(Take(frame.m_ImageAcquireSem), nullptr);
m_Device->destroy(Take(frame.m_RenderFinishSem), nullptr);
m_Device->destroy(Take(frame.m_Pool), nullptr);
}
m_Device->destroy(Take(m_TransferPool), nullptr);
m_TransferContexts.clear();
@ -1164,7 +1056,7 @@ systems::Device::Present(Frame &frame, GraphicsContext &graphicsContext)
.signalSemaphoreCount = 1,
.pSignalSemaphores = &frame.m_RenderFinishSem,
};
vk::Result result = m_GraphicsQueue.submit(1, &submitInfo, frame.m_FrameAvailableFence);
vk::Result result = m_PrimaryQueue.submit(1, &submitInfo, frame.m_FrameAvailableFence);
ERROR_IF(Failed(result), "Command queue submit failed. Cause: {}", result)
THEN_ABORT(result);
@ -1176,7 +1068,7 @@ systems::Device::Present(Frame &frame, GraphicsContext &graphicsContext)
.pImageIndices = &frame.m_ImageIdx,
.pResults = nullptr,
};
switch (result = m_GraphicsQueue.presentKHR(&presentInfo))
switch (result = m_PrimaryQueue.presentKHR(&presentInfo))
{
case vk::Result::eSuccess:
break;
@ -1193,7 +1085,7 @@ systems::Device::Present(Frame &frame, GraphicsContext &graphicsContext)
systems::TransferContext &
systems::Device::CreateTransferContext()
{
if (!m_TransferContextFreeList.empty())
/*if (!m_TransferContextFreeList.empty())
{
u32 freeIndex = m_TransferContextFreeList.back();
m_TransferContextFreeList.pop_back();
@ -1212,6 +1104,8 @@ systems::Device::CreateTransferContext()
AbortIfFailed(m_Device->allocateCommandBuffers(&allocateInfo, &cmd));
m_TransferContexts.push_back(TransferContext{*this, cmd});
return m_TransferContexts.back();*/
TODO();
return m_TransferContexts.back();
}
@ -1241,7 +1135,7 @@ systems::Device::Submit(Context &context)
.signalSemaphoreCount = 1,
.pSignalSemaphores = &entry.m_Semaphore,
};
vk::Result result = m_GraphicsQueue.submit(1, &submitInfo, {});
vk::Result result = m_PrimaryQueue.submit(1, &submitInfo, {});
ERROR_IF(Failed(result), "Command queue submit failed. Cause: {}", result)
THEN_ABORT(result);
@ -1259,8 +1153,9 @@ systems::Frame::Reset(u32 imageIdx, vk::Image swapchainImage, vk::ImageView swap
{
AbortIfFailedMV(m_Device->m_Device->resetFences(1, &m_FrameAvailableFence), "Fence {} reset failed.", m_FrameIdx);
AbortIfFailedMV(m_Device->m_Device->resetCommandPool(m_Pool, {}), "Command pool {} reset failed.", m_FrameIdx);
m_CommandBuffersAllocated = 0;
m_PrimaryPool.Reset();
m_AsyncTransferPool.Reset();
m_AsyncComputePool.Reset();
m_ImageIdx = imageIdx;
m_SwapchainImage = swapchainImage;
@ -1271,47 +1166,13 @@ systems::Frame::Reset(u32 imageIdx, vk::Image swapchainImage, vk::ImageView swap
systems::GraphicsContext
systems::Frame::CreateGraphicsContext()
{
vk::CommandBuffer cmd;
if (m_CommandBuffers.size() > m_CommandBuffersAllocated)
{
cmd = m_CommandBuffers[m_CommandBuffersAllocated++];
}
else
{
const vk::CommandBufferAllocateInfo allocateInfo{
.commandPool = m_Pool,
.level = vk::CommandBufferLevel::ePrimary,
.commandBufferCount = 1,
};
AbortIfFailedMV(m_Device->m_Device->allocateCommandBuffers(&allocateInfo, &cmd),
"Command buffer {} alloc failed.", m_FrameIdx);
m_CommandBuffers.push_back(cmd);
}
return GraphicsContext{cmd};
return m_PrimaryPool.CreateGraphicsContext();
}
systems::TransferContext
systems::Frame::CreateTransferContext()
systems::Frame::CreateAsyncTransferContext()
{
vk::CommandBuffer cmd;
if (m_CommandBuffers.size() > m_CommandBuffersAllocated)
{
cmd = m_CommandBuffers[m_CommandBuffersAllocated++];
}
else
{
const vk::CommandBufferAllocateInfo allocateInfo{
.commandPool = m_Pool,
.level = vk::CommandBufferLevel::ePrimary,
.commandBufferCount = 1,
};
AbortIfFailedMV(m_Device->m_Device->allocateCommandBuffers(&allocateInfo, &cmd),
"Command buffer {} alloc failed.", m_FrameIdx);
m_CommandBuffers.push_back(cmd);
}
return TransferContext{*m_Device, cmd};
return m_AsyncTransferPool.CreateTransferContext();
}
void
@ -1321,21 +1182,38 @@ systems::Frame::WaitUntilReady()
"Waiting for fence {} failed.", m_FrameIdx);
}
systems::Frame::Frame(Device &device, u32 primaryQueueFamily, u32 frameIndex)
systems::Frame &
systems::Frame::operator=(Frame &&other) noexcept
{
if (this == &other)
return *this;
m_Device = other.m_Device;
m_PrimaryPool = Take(other.m_PrimaryPool);
m_AsyncTransferPool = Take(other.m_AsyncTransferPool);
m_AsyncComputePool = Take(other.m_AsyncComputePool);
m_FrameAvailableFence = Take(other.m_FrameAvailableFence);
m_ImageAcquireSem = Take(other.m_ImageAcquireSem);
m_RenderFinishSem = Take(other.m_RenderFinishSem);
m_FrameIdx = other.m_FrameIdx;
m_SwapchainImage = Take(other.m_SwapchainImage);
m_SwapchainImageView = Take(other.m_SwapchainImageView);
m_SwapchainSize = Take(other.m_SwapchainSize);
m_ImageIdx = other.m_ImageIdx;
return *this;
}
systems::Frame::Frame(Device &device, u32 frameIndex, u32 const primaryQueueFamily, u32 const asyncTransferQueue,
u32 const asyncComputeQueue)
: m_Device{&device}
, m_PrimaryPool{device, primaryQueueFamily}
, m_AsyncTransferPool{device, asyncTransferQueue}
, m_AsyncComputePool{device, asyncComputeQueue}
, m_FrameIdx{frameIndex}
, m_ImageIdx{0}
{
NameString name = "Frame ";
name += static_cast<char>(frameIndex + '0');
const vk::CommandPoolCreateInfo commandPoolCreateInfo = {
.flags = vk::CommandPoolCreateFlagBits::eTransient,
.queueFamilyIndex = primaryQueueFamily,
};
AbortIfFailedMV(device.m_Device->createCommandPool(&commandPoolCreateInfo, nullptr, &m_Pool),
"Could not command pool for frame {}", frameIndex);
constexpr vk::FenceCreateInfo fenceCreateInfo = {.flags = vk::FenceCreateFlagBits::eSignaled};
AbortIfFailedMV(device.m_Device->createFence(&fenceCreateInfo, nullptr, &m_FrameAvailableFence),
"Could not create a fence for frame {}", frameIndex);
@ -1346,7 +1224,6 @@ systems::Frame::Frame(Device &device, u32 primaryQueueFamily, u32 frameIndex)
AbortIfFailedMV(device.m_Device->createSemaphore(&semaphoreCreateInfo, nullptr, &m_RenderFinishSem),
"Could not create RF semaphore for frame {}.", frameIndex);
m_Device->SetName(m_Pool, name.c_str());
m_Device->SetName(m_FrameAvailableFence, name.c_str());
m_Device->SetName(m_ImageAcquireSem, name.c_str());
m_Device->SetName(m_RenderFinishSem, name.c_str());
@ -1354,186 +1231,20 @@ systems::Frame::Frame(Device &device, u32 primaryQueueFamily, u32 frameIndex)
DEBUG("Frame {} created successfully.", frameIndex);
}
systems::Frame &
systems::Frame::operator=(Frame &&other) noexcept
{
if (this == &other)
return *this;
m_Device = other.m_Device;
m_Pool = Take(other.m_Pool);
m_FrameAvailableFence = Take(other.m_FrameAvailableFence);
m_ImageAcquireSem = Take(other.m_ImageAcquireSem);
m_RenderFinishSem = Take(other.m_RenderFinishSem);
m_FrameIdx = other.m_FrameIdx;
m_CommandBuffers = Take(other.m_CommandBuffers);
m_SwapchainImage = Take(other.m_SwapchainImage);
m_SwapchainImageView = Take(other.m_SwapchainImageView);
m_SwapchainSize = Take(other.m_SwapchainSize);
m_ImageIdx = other.m_ImageIdx;
m_CommandBuffersAllocated = other.m_CommandBuffersAllocated;
return *this;
}
systems::Frame::Frame(Frame &&other) noexcept
: m_Device{Take(other.m_Device)}
, m_Pool{Take(other.m_Pool)}
, m_PrimaryPool{std::move(other.m_PrimaryPool)}
, m_AsyncTransferPool{std::move(other.m_AsyncTransferPool)}
, m_AsyncComputePool{std::move(other.m_AsyncComputePool)}
, m_FrameAvailableFence{Take(other.m_FrameAvailableFence)}
, m_ImageAcquireSem{Take(other.m_ImageAcquireSem)}
, m_RenderFinishSem{Take(other.m_RenderFinishSem)}
, m_FrameIdx{other.m_FrameIdx}
, m_CommandBuffers{Take(other.m_CommandBuffers)}
, m_SwapchainImage{Take(other.m_SwapchainImage)}
, m_SwapchainImageView{Take(other.m_SwapchainImageView)}
, m_SwapchainSize{Take(other.m_SwapchainSize)}
, m_ImageIdx{other.m_ImageIdx}
, m_CommandBuffersAllocated{other.m_CommandBuffersAllocated}
{
}
#pragma endregion
// ====================================================================================================
#pragma region Context Impl
// ====================================================================================================
void
systems::Context::Dependency(const vk::DependencyInfo &dependencyInfo)
{
m_Cmd.pipelineBarrier2(&dependencyInfo);
}
void
systems::Context::Begin()
{
vk::CommandBufferBeginInfo commandBufferBeginInfo = {
.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit,
};
auto result = m_Cmd.begin(&commandBufferBeginInfo);
ERROR_IF(Failed(result), "Could not begin context") THEN_ABORT(result);
}
void
systems::Context::End()
{
auto result = m_Cmd.end();
ERROR_IF(Failed(result), "Could not end context") THEN_ABORT(result);
}
void
systems::GraphicsContext::SetViewport(const vk::Viewport &viewport)
{
m_Cmd.setViewport(0, 1, &viewport);
}
void
systems::GraphicsContext::BindVertexBuffer(const Ref<VertexBuffer> &vertexBuffer)
{
constexpr vk::DeviceSize offset = 0;
m_Cmd.bindVertexBuffers(0, 1, &vertexBuffer->m_Buffer, &offset);
}
void
systems::GraphicsContext::BindPipeline(const Pipeline &pipeline)
{
m_Cmd.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline.m_Pipeline);
// TODO: Maybe find a smarter place to host this.
if (CommitManager::IsInit())
{
m_Cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline.m_Layout, 0, 1,
&CommitManager::Instance().GetDescriptorSet(), 0, nullptr);
}
m_PipelineInUse = &pipeline;
}
void
systems::GraphicsContext::Draw(const usize vertexCount)
{
m_Cmd.draw(static_cast<u32>(vertexCount), 1, 0, 0);
}
void
systems::GraphicsContext::DrawIndexed(usize indexCount)
{
m_Cmd.drawIndexed(static_cast<u32>(indexCount), 1, 0, 0, 0);
}
void
systems::GraphicsContext::BeginRendering(const vk::RenderingInfo &renderingInfo)
{
m_Cmd.beginRendering(&renderingInfo);
m_Cmd.setScissor(0, 1, &renderingInfo.renderArea);
}
void
systems::GraphicsContext::EndRendering()
{
m_Cmd.endRendering();
}
void
systems::TransferContext::Reset()
{
m_OwnedImages.clear();
m_OwnedBuffers.clear();
AbortIfFailed(m_Cmd.reset({}));
}
void
systems::TransferContext::UploadTexture(const Ref<Image> &image, const ImageData &data)
{
ERROR_IF(not(image and image->IsValid()), "Invalid image");
auto [w, h, d] = image->m_Extent;
auto formatSize = GetFormatSize(image->m_Format);
auto expectedByteSize = static_cast<u64>(w) * static_cast<u64>(h) * static_cast<u64>(d) * formatSize;
ERROR_IF(expectedByteSize != data.m_NumBytes, "Mismatch in data size {} vs image size {} ({}x{}x{}x{})",
data.m_NumBytes, expectedByteSize, w, h, d, formatSize);
const Ref<StagingBuffer> stagingBuffer = m_Device->CreateStagingBuffer(data.m_NumBytes);
stagingBuffer->Write(0, data.m_NumBytes, data.m_Data);
const vk::BufferImageCopy bufferImageCopy = {
.bufferOffset = 0,
.bufferRowLength = w,
.bufferImageHeight = h,
.imageSubresource =
{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
},
.imageOffset = {},
.imageExtent = image->m_Extent,
};
m_Cmd.copyBufferToImage(stagingBuffer->m_Buffer, image->m_Image, vk::ImageLayout::eTransferDstOptimal, 1,
&bufferImageCopy);
m_OwnedBuffers.push_back(stagingBuffer);
m_OwnedImages.push_back(image);
}
systems::TransferContext::TransferContext(TransferContext &&other) noexcept
: Context{other.m_Cmd}
, m_Device{Take(other.m_Device)}
, m_OwnedBuffers{std::move(other.m_OwnedBuffers)}
, m_OwnedImages{std::move(other.m_OwnedImages)}
{
}
systems::TransferContext &
systems::TransferContext::operator=(TransferContext &&other) noexcept
{
if (this == &other)
return *this;
m_Cmd = other.m_Cmd;
m_Device = Take(other.m_Device);
m_OwnedBuffers = std::move(other.m_OwnedBuffers);
m_OwnedImages = std::move(other.m_OwnedImages);
return *this;
}
#pragma endregion