Gpu resource manager actually manages resource.
This commit is contained in:
parent
b9dcf5e4ec
commit
912e197614
|
|
@ -11,11 +11,11 @@
|
|||
void
|
||||
Buffer::Destroy(const Device *device)
|
||||
{
|
||||
if (!IsValid())
|
||||
if (!IsValid() || !IsOwned())
|
||||
return;
|
||||
|
||||
vmaDestroyBuffer(device->m_Allocator, m_Buffer, m_Allocation);
|
||||
m_Size &= ~VALID_BUFFER_BIT;
|
||||
m_Size_ = 0;
|
||||
}
|
||||
|
||||
void
|
||||
|
|
@ -47,7 +47,7 @@ Buffer::Allocate(const Device *device, usize size, vk::BufferUsageFlags bufferUs
|
|||
bool hostAccessible = Cast<bool>(memoryPropertyFlags & vk::MemoryPropertyFlagBits::eHostVisible);
|
||||
|
||||
m_Buffer = buffer;
|
||||
m_Size = size | VALID_BUFFER_BIT | (hostAccessible ? HOST_ACCESSIBLE_BIT : 0);
|
||||
m_Size_ = size | VALID_BUFFER_BIT | OWNED_BIT | (hostAccessible ? HOST_ACCESSIBLE_BIT : 0);
|
||||
m_Allocation = allocation;
|
||||
m_Mapped = Cast<u8 *>(allocationInfo.pMappedData);
|
||||
|
||||
|
|
|
|||
|
|
@ -19,23 +19,27 @@ struct Buffer
|
|||
[[nodiscard]] bool IsHostVisible() const;
|
||||
[[nodiscard]] bool IsValid() const;
|
||||
[[nodiscard]] bool IsMapped() const;
|
||||
[[nodiscard]] bool IsOwned() const;
|
||||
|
||||
void Destroy(const Device *device);
|
||||
void Write(const Device *device, usize offset, usize size, const void *data);
|
||||
|
||||
protected:
|
||||
void Allocate(const Device *device, usize size, vk::BufferUsageFlags bufferUsage,
|
||||
VmaAllocationCreateFlags allocationFlags, VmaMemoryUsage memoryUsage, cstr name);
|
||||
|
||||
// Buffer size is used intrusively by the Render Resource Manager
|
||||
// If the buffer is Invalid, the remaining data in Buffer is used for other tasks.
|
||||
usize m_Size = 0;
|
||||
// Buffer.size is used for bookkeeping
|
||||
// If the buffer is Invalid, the remaining data in Buffer is used intrusively by `GpuResourceManager`.
|
||||
usize m_Size_ = 0;
|
||||
|
||||
constexpr static usize VALID_BUFFER_BIT = Cast<usize>(1llu << 63);
|
||||
constexpr static usize HOST_ACCESSIBLE_BIT = 1llu << 62;
|
||||
constexpr static usize SIZE_MASK = ~(VALID_BUFFER_BIT | HOST_ACCESSIBLE_BIT);
|
||||
constexpr static usize OWNED_BIT = 1llu << 61;
|
||||
constexpr static usize SIZE_MASK = ~(VALID_BUFFER_BIT | HOST_ACCESSIBLE_BIT | OWNED_BIT);
|
||||
};
|
||||
|
||||
// Ensure that m_Size doesn't get used intrusively since it manages the state.
|
||||
static_assert(offsetof(Buffer, m_Size_) > sizeof(usize));
|
||||
|
||||
struct UniformBuffer : Buffer
|
||||
{
|
||||
void Init(const Device *device, usize size, cstr name = nullptr);
|
||||
|
|
@ -66,19 +70,19 @@ struct StagingBuffer : Buffer
|
|||
inline usize
|
||||
Buffer::GetSize() const
|
||||
{
|
||||
return m_Size & SIZE_MASK;
|
||||
return m_Size_ & SIZE_MASK;
|
||||
}
|
||||
|
||||
inline bool
|
||||
Buffer::IsHostVisible() const
|
||||
{
|
||||
return m_Size & HOST_ACCESSIBLE_BIT;
|
||||
return m_Size_ & HOST_ACCESSIBLE_BIT;
|
||||
}
|
||||
|
||||
inline bool
|
||||
Buffer::IsValid() const
|
||||
{
|
||||
return m_Size & VALID_BUFFER_BIT;
|
||||
return m_Size_ & VALID_BUFFER_BIT;
|
||||
}
|
||||
|
||||
inline bool
|
||||
|
|
@ -86,3 +90,9 @@ Buffer::IsMapped() const
|
|||
{
|
||||
return m_Mapped;
|
||||
}
|
||||
|
||||
inline bool
|
||||
Buffer::IsOwned() const
|
||||
{
|
||||
return m_Size_ & OWNED_BIT;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,12 +10,12 @@
|
|||
void
|
||||
Image::Destroy(const Device *device)
|
||||
{
|
||||
if (!IsValid())
|
||||
if (!IsValid() || !IsOwned())
|
||||
return;
|
||||
|
||||
device->m_Device.destroy(m_View, nullptr);
|
||||
vmaDestroyImage(device->m_Allocator, m_Image, m_Allocation);
|
||||
m_Image = nullptr;
|
||||
m_MipLevels_ = 0;
|
||||
}
|
||||
|
||||
void
|
||||
|
|
@ -23,6 +23,7 @@ Texture::Init(const Device *device, const vk::Extent2D extent, vk::Format imageF
|
|||
const cstr name)
|
||||
{
|
||||
const u32 mipLevels = isMipmapped ? 1 + Cast<u32>(floor(log2(eastl::max(extent.width, extent.height)))) : 1;
|
||||
assert(mipLevels <= MIP_MASK);
|
||||
|
||||
auto usage = vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferDst;
|
||||
if (isMipmapped)
|
||||
|
|
@ -75,7 +76,7 @@ Texture::Init(const Device *device, const vk::Extent2D extent, vk::Format imageF
|
|||
m_View = view;
|
||||
m_Allocation = allocation;
|
||||
m_Extent = {extent.width, extent.height, 1};
|
||||
m_MipLevels = Cast<u8>(mipLevels);
|
||||
m_MipLevels_ = mipLevels | OWNED_BIT | VALID_BIT;
|
||||
|
||||
device->SetName(m_Image, name);
|
||||
}
|
||||
|
|
@ -129,6 +130,7 @@ DepthImage::Init(const Device *device, vk::Extent2D extent, cstr name)
|
|||
m_View = view;
|
||||
m_Allocation = allocation;
|
||||
m_Extent = {extent.width, extent.height, 1};
|
||||
m_MipLevels_ = 1 | OWNED_BIT | VALID_BIT;
|
||||
|
||||
device->SetName(m_Image, name);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,11 +15,19 @@ struct Image
|
|||
vk::ImageView m_View = nullptr;
|
||||
VmaAllocation m_Allocation = nullptr;
|
||||
vk::Extent3D m_Extent;
|
||||
u8 m_MipLevels = 1;
|
||||
// Image.m_MipLevels_ is used for bookkeeping
|
||||
// If the image is Invalid, the remaining data in Image is used intrusively by `GpuResourceManager`.
|
||||
u32 m_MipLevels_;
|
||||
|
||||
[[nodiscard]] bool IsValid() const;
|
||||
[[nodiscard]] bool IsOwned() const;
|
||||
[[nodiscard]] u32 GetMipLevels() const;
|
||||
|
||||
void Destroy(const Device *device);
|
||||
|
||||
constexpr static u32 VALID_BIT = 1u << 31;
|
||||
constexpr static u32 OWNED_BIT = 1u << 30;
|
||||
constexpr static u32 MIP_MASK = ~(VALID_BIT | OWNED_BIT);
|
||||
};
|
||||
|
||||
struct Texture : Image
|
||||
|
|
@ -35,5 +43,17 @@ struct DepthImage : Image
|
|||
inline bool
|
||||
Image::IsValid() const
|
||||
{
|
||||
return m_Image;
|
||||
return m_MipLevels_ & VALID_BIT;
|
||||
}
|
||||
|
||||
inline bool
|
||||
Image::IsOwned() const
|
||||
{
|
||||
return m_MipLevels_ & OWNED_BIT;
|
||||
}
|
||||
|
||||
inline u32
|
||||
Image::GetMipLevels() const
|
||||
{
|
||||
return m_MipLevels_ & MIP_MASK;
|
||||
}
|
||||
Binary file not shown.
|
|
@ -45,13 +45,15 @@ VectorToVec3(const std::vector<double> &vec)
|
|||
}
|
||||
|
||||
TextureHandle
|
||||
ModelLoader::LoadImage(vk::CommandBuffer commandBuffer, Texture *texture, StagingBuffer *stagingBuffer,
|
||||
ModelLoader::LoadImage(vk::CommandBuffer commandBuffer, StagingBuffer *stagingBuffer,
|
||||
tinygltf::Image *image) const
|
||||
{
|
||||
assert(image->component == 4);
|
||||
|
||||
Texture texture;
|
||||
|
||||
usize byteSize = image->image.size();
|
||||
texture->Init(m_ResourceManager->m_Device, {.width = Cast<u32>(image->width), .height = Cast<u32>(image->height)},
|
||||
texture.Init(m_ResourceManager->m_Device, {.width = Cast<u32>(image->width), .height = Cast<u32>(image->height)},
|
||||
vk::Format::eR8G8B8A8Srgb, true, image->name.data());
|
||||
stagingBuffer->Init(m_ResourceManager->m_Device, byteSize);
|
||||
stagingBuffer->Write(m_ResourceManager->m_Device, 0, byteSize, image->image.data());
|
||||
|
|
@ -63,12 +65,12 @@ ModelLoader::LoadImage(vk::CommandBuffer commandBuffer, Texture *texture, Stagin
|
|||
.newLayout = vk::ImageLayout::eTransferDstOptimal,
|
||||
.srcQueueFamilyIndex = vk::QueueFamilyIgnored,
|
||||
.dstQueueFamilyIndex = vk::QueueFamilyIgnored,
|
||||
.image = texture->m_Image,
|
||||
.image = texture.m_Image,
|
||||
.subresourceRange =
|
||||
{
|
||||
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
||||
.baseMipLevel = 0,
|
||||
.levelCount = texture->m_MipLevels,
|
||||
.levelCount = texture.GetMipLevels(),
|
||||
.baseArrayLayer = 0,
|
||||
.layerCount = 1,
|
||||
},
|
||||
|
|
@ -81,7 +83,7 @@ ModelLoader::LoadImage(vk::CommandBuffer commandBuffer, Texture *texture, Stagin
|
|||
.newLayout = vk::ImageLayout::eTransferSrcOptimal,
|
||||
.srcQueueFamilyIndex = vk::QueueFamilyIgnored,
|
||||
.dstQueueFamilyIndex = vk::QueueFamilyIgnored,
|
||||
.image = texture->m_Image,
|
||||
.image = texture.m_Image,
|
||||
.subresourceRange =
|
||||
{
|
||||
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
||||
|
|
@ -99,12 +101,12 @@ ModelLoader::LoadImage(vk::CommandBuffer commandBuffer, Texture *texture, Stagin
|
|||
.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
|
||||
.srcQueueFamilyIndex = m_TransferQueueIndex,
|
||||
.dstQueueFamilyIndex = m_GraphicsQueueIndex,
|
||||
.image = texture->m_Image,
|
||||
.image = texture.m_Image,
|
||||
.subresourceRange =
|
||||
{
|
||||
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
||||
.baseMipLevel = 0,
|
||||
.levelCount = texture->m_MipLevels,
|
||||
.levelCount = texture.GetMipLevels(),
|
||||
.baseArrayLayer = 0,
|
||||
.layerCount = 1,
|
||||
},
|
||||
|
|
@ -122,22 +124,24 @@ ModelLoader::LoadImage(vk::CommandBuffer commandBuffer, Texture *texture, Stagin
|
|||
.layerCount = 1,
|
||||
},
|
||||
.imageOffset = {},
|
||||
.imageExtent = texture->m_Extent,
|
||||
.imageExtent = texture.m_Extent,
|
||||
};
|
||||
|
||||
commandBuffer.pipelineBarrier(vk::PipelineStageFlagBits::eTopOfPipe, vk::PipelineStageFlagBits::eTransfer, {}, 0,
|
||||
nullptr, 0, nullptr, 1, &imageStartBarrier);
|
||||
commandBuffer.copyBufferToImage(stagingBuffer->m_Buffer, texture->m_Image, vk::ImageLayout::eTransferDstOptimal, 1,
|
||||
commandBuffer.copyBufferToImage(stagingBuffer->m_Buffer, texture.m_Image, vk::ImageLayout::eTransferDstOptimal, 1,
|
||||
&imageCopy);
|
||||
commandBuffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer, vk::PipelineStageFlagBits::eTransfer, {}, 0,
|
||||
nullptr, 0, nullptr, 1, &nextMipBarrier);
|
||||
|
||||
auto calcNextMip = [](i32 prev) { return eastl::max(prev / 2, 1); };
|
||||
auto calcNextMip = [](i32 prev) {
|
||||
return eastl::max(prev / 2, 1);
|
||||
};
|
||||
|
||||
i32 prevMipWidth = Cast<i32>(texture->m_Extent.width);
|
||||
i32 prevMipHeight = Cast<i32>(texture->m_Extent.height);
|
||||
i32 prevMipWidth = Cast<i32>(texture.m_Extent.width);
|
||||
i32 prevMipHeight = Cast<i32>(texture.m_Extent.height);
|
||||
|
||||
u32 maxPrevMip = texture->m_MipLevels - 1;
|
||||
u32 maxPrevMip = texture.GetMipLevels() - 1;
|
||||
for (u32 prevMipLevel = 0; prevMipLevel < maxPrevMip; ++prevMipLevel)
|
||||
{
|
||||
i32 currentMipWidth = calcNextMip(prevMipWidth);
|
||||
|
|
@ -152,8 +156,7 @@ ModelLoader::LoadImage(vk::CommandBuffer commandBuffer, Texture *texture, Stagin
|
|||
.baseArrayLayer = 0,
|
||||
.layerCount = 1,
|
||||
},
|
||||
.srcOffsets =
|
||||
std::array{
|
||||
.srcOffsets = std::array{
|
||||
vk::Offset3D{0, 0, 0},
|
||||
vk::Offset3D{prevMipWidth, prevMipHeight, 1},
|
||||
},
|
||||
|
|
@ -164,15 +167,14 @@ ModelLoader::LoadImage(vk::CommandBuffer commandBuffer, Texture *texture, Stagin
|
|||
.baseArrayLayer = 0,
|
||||
.layerCount = 1,
|
||||
},
|
||||
.dstOffsets =
|
||||
std::array{
|
||||
.dstOffsets = std::array{
|
||||
vk::Offset3D{0, 0, 0},
|
||||
vk::Offset3D{currentMipWidth, currentMipHeight, 1},
|
||||
},
|
||||
};
|
||||
|
||||
nextMipBarrier.subresourceRange.baseMipLevel = currentMipLevel;
|
||||
commandBuffer.blitImage(texture->m_Image, vk::ImageLayout::eTransferSrcOptimal, texture->m_Image,
|
||||
commandBuffer.blitImage(texture.m_Image, vk::ImageLayout::eTransferSrcOptimal, texture.m_Image,
|
||||
vk::ImageLayout::eTransferDstOptimal, 1, &blitRegion, vk::Filter::eLinear);
|
||||
commandBuffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer, vk::PipelineStageFlagBits::eTransfer, {}, 0,
|
||||
nullptr, 0, nullptr, 1, &nextMipBarrier);
|
||||
|
|
@ -184,7 +186,7 @@ ModelLoader::LoadImage(vk::CommandBuffer commandBuffer, Texture *texture, Stagin
|
|||
commandBuffer.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer, vk::PipelineStageFlagBits::eFragmentShader, {},
|
||||
0, nullptr, 0, nullptr, 1, &imageReadyBarrier);
|
||||
|
||||
return m_ResourceManager->Commit(texture);
|
||||
return m_ResourceManager->Commit(&texture);
|
||||
}
|
||||
|
||||
Model
|
||||
|
|
@ -225,7 +227,6 @@ ModelLoader::LoadModel(cstr path, cstr name, bool batched)
|
|||
}
|
||||
|
||||
eastl::vector<StagingBuffer> stagingBuffers;
|
||||
eastl::vector<Texture> textures;
|
||||
eastl::vector<TextureHandle> textureHandles;
|
||||
|
||||
if (!model.images.empty())
|
||||
|
|
@ -233,15 +234,13 @@ ModelLoader::LoadModel(cstr path, cstr name, bool batched)
|
|||
u32 numImages = Cast<u32>(model.images.size());
|
||||
|
||||
stagingBuffers.resize(numImages);
|
||||
textures.resize(numImages);
|
||||
textureHandles.resize(numImages);
|
||||
|
||||
auto stagingPtr = stagingBuffers.data();
|
||||
auto texturePtr = textures.data();
|
||||
auto imagePtr = model.images.data();
|
||||
for (TextureHandle &handle : textureHandles)
|
||||
{
|
||||
handle = LoadImage(m_CommandBuffer, texturePtr++, stagingPtr++, imagePtr++);
|
||||
handle = LoadImage(m_CommandBuffer, stagingPtr++, imagePtr++);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -466,6 +465,7 @@ ModelLoader::LoadModel(cstr path, cstr name, bool batched)
|
|||
}
|
||||
}
|
||||
|
||||
#pragma region Staging / Transfer / Uploads
|
||||
StorageBuffer positionBuffer;
|
||||
positionBuffer.Init(pDevice, vertexPositions.size() * sizeof vertexPositions[0], false);
|
||||
BufferHandle positionBufferHandle = m_ResourceManager->Commit(&positionBuffer);
|
||||
|
|
@ -523,6 +523,8 @@ ModelLoader::LoadModel(cstr path, cstr name, bool batched)
|
|||
m_CommandBuffer.copyBuffer(indexStaging.m_Buffer, indexBuffer.m_Buffer, 1, &bufferCopy);
|
||||
}
|
||||
|
||||
#pragma endregion
|
||||
|
||||
AbortIfFailed(m_CommandBuffer.end());
|
||||
|
||||
vk::SubmitInfo submitInfo = {
|
||||
|
|
@ -547,27 +549,19 @@ ModelLoader::LoadModel(cstr path, cstr name, bool batched)
|
|||
buffer.Destroy(pDevice);
|
||||
}
|
||||
|
||||
return Model{m_ResourceManager, std::move(textures), std::move(textureHandles), materialsBuffer,
|
||||
materialsHandle, positionBuffer, positionBufferHandle, normalBuffer,
|
||||
normalBufferHandle, texCoord0Buffer, texCoord0BufferHandle, indexBuffer,
|
||||
meshPrimitives};
|
||||
return Model{m_ResourceManager, std::move(textureHandles),
|
||||
materialsHandle, positionBufferHandle, normalBufferHandle,
|
||||
texCoord0BufferHandle, indexBuffer, meshPrimitives};
|
||||
}
|
||||
|
||||
Model::Model(RenderResourceManager *resourceManager, eastl::vector<Texture> &&textures,
|
||||
eastl::vector<TextureHandle> &&textureHandles, const StorageBuffer &materialsBuffer,
|
||||
BufferHandle materialsHandle, const StorageBuffer &vertexPosBuffer, BufferHandle vertexPosHandle,
|
||||
const StorageBuffer &normalBuffer, BufferHandle normalHandle, const StorageBuffer &uv0Buffer,
|
||||
Model::Model(GpuResourceManager *resourceManager, eastl::vector<TextureHandle> &&textureHandles,
|
||||
BufferHandle materialsHandle, BufferHandle vertexPosHandle, BufferHandle normalHandle,
|
||||
BufferHandle uv0Handle, const IndexBuffer &indexBuffer, const eastl::vector<MeshPrimitive> &meshPrimitives)
|
||||
: m_ResourceManager(resourceManager)
|
||||
, m_Textures(textures)
|
||||
, m_TextureHandles(textureHandles)
|
||||
, m_MaterialsBuffer(materialsBuffer)
|
||||
, m_TextureHandles(std::move(textureHandles))
|
||||
, m_MaterialsHandle(materialsHandle)
|
||||
, m_VertexPositions(vertexPosBuffer)
|
||||
, m_VertexPositionHandle(vertexPosHandle)
|
||||
, m_NormalVectors(normalBuffer)
|
||||
, m_NormalHandle(normalHandle)
|
||||
, m_TexCoord0(uv0Buffer)
|
||||
, m_TexCoord0Handle(uv0Handle)
|
||||
, m_IndexBuffer(indexBuffer)
|
||||
, m_MeshPrimitives(meshPrimitives)
|
||||
|
|
@ -576,15 +570,10 @@ Model::Model(RenderResourceManager *resourceManager, eastl::vector<Texture> &&te
|
|||
|
||||
Model::Model(Model &&other) noexcept
|
||||
: m_ResourceManager(Take(other.m_ResourceManager))
|
||||
, m_Textures(std::move(other.m_Textures))
|
||||
, m_TextureHandles(std::move(other.m_TextureHandles))
|
||||
, m_MaterialsBuffer(other.m_MaterialsBuffer)
|
||||
, m_MaterialsHandle(other.m_MaterialsHandle)
|
||||
, m_VertexPositions(other.m_VertexPositions)
|
||||
, m_VertexPositionHandle(other.m_VertexPositionHandle)
|
||||
, m_NormalVectors(other.m_NormalVectors)
|
||||
, m_NormalHandle(other.m_NormalHandle)
|
||||
, m_TexCoord0(other.m_TexCoord0)
|
||||
, m_TexCoord0Handle(other.m_TexCoord0Handle)
|
||||
, m_IndexBuffer(other.m_IndexBuffer)
|
||||
, m_MeshPrimitives(std::move(other.m_MeshPrimitives))
|
||||
|
|
@ -597,15 +586,10 @@ Model::operator=(Model &&other) noexcept
|
|||
if (this == &other)
|
||||
return *this;
|
||||
m_ResourceManager = Take(other.m_ResourceManager);
|
||||
m_Textures = std::move(other.m_Textures);
|
||||
m_TextureHandles = std::move(other.m_TextureHandles);
|
||||
m_MaterialsBuffer = other.m_MaterialsBuffer;
|
||||
m_MaterialsHandle = other.m_MaterialsHandle;
|
||||
m_VertexPositions = other.m_VertexPositions;
|
||||
m_VertexPositionHandle = other.m_VertexPositionHandle;
|
||||
m_NormalVectors = other.m_NormalVectors;
|
||||
m_NormalHandle = other.m_NormalHandle;
|
||||
m_TexCoord0 = other.m_TexCoord0;
|
||||
m_TexCoord0Handle = other.m_TexCoord0Handle;
|
||||
m_IndexBuffer = other.m_IndexBuffer;
|
||||
m_MeshPrimitives = std::move(other.m_MeshPrimitives);
|
||||
|
|
@ -617,11 +601,7 @@ Model::~Model()
|
|||
if (!m_ResourceManager)
|
||||
return;
|
||||
|
||||
m_VertexPositions.Destroy(m_ResourceManager->m_Device);
|
||||
m_IndexBuffer.Destroy(m_ResourceManager->m_Device);
|
||||
m_NormalVectors.Destroy(m_ResourceManager->m_Device);
|
||||
m_TexCoord0.Destroy(m_ResourceManager->m_Device);
|
||||
|
||||
m_ResourceManager->Release(m_VertexPositionHandle);
|
||||
m_ResourceManager->Release(m_NormalHandle);
|
||||
m_ResourceManager->Release(m_TexCoord0Handle);
|
||||
|
|
@ -630,14 +610,9 @@ Model::~Model()
|
|||
m_ResourceManager->Release(handle);
|
||||
}
|
||||
m_ResourceManager->Release(m_MaterialsHandle);
|
||||
for (Texture &texture : m_Textures)
|
||||
{
|
||||
texture.Destroy(m_ResourceManager->m_Device);
|
||||
}
|
||||
m_MaterialsBuffer.Destroy(m_ResourceManager->m_Device);
|
||||
}
|
||||
|
||||
ModelLoader::ModelLoader(RenderResourceManager *resourceManager, vk::Queue transferQueue, u32 transferQueueIndex,
|
||||
ModelLoader::ModelLoader(GpuResourceManager *resourceManager, vk::Queue transferQueue, u32 transferQueueIndex,
|
||||
u32 graphicsQueueIndex)
|
||||
: m_ResourceManager(resourceManager)
|
||||
, m_TransferQueue(transferQueue)
|
||||
|
|
|
|||
|
|
@ -44,31 +44,21 @@ struct Material
|
|||
|
||||
struct Model
|
||||
{
|
||||
RenderResourceManager *m_ResourceManager;
|
||||
GpuResourceManager *m_ResourceManager;
|
||||
|
||||
eastl::vector<Texture> m_Textures;
|
||||
eastl::vector<TextureHandle> m_TextureHandles;
|
||||
|
||||
StorageBuffer m_MaterialsBuffer;
|
||||
BufferHandle m_MaterialsHandle;
|
||||
|
||||
StorageBuffer m_VertexPositions;
|
||||
BufferHandle m_VertexPositionHandle;
|
||||
|
||||
StorageBuffer m_NormalVectors;
|
||||
BufferHandle m_NormalHandle;
|
||||
|
||||
StorageBuffer m_TexCoord0;
|
||||
BufferHandle m_TexCoord0Handle;
|
||||
|
||||
IndexBuffer m_IndexBuffer;
|
||||
eastl::vector<MeshPrimitive> m_MeshPrimitives;
|
||||
|
||||
Model(RenderResourceManager *resourceManager, eastl::vector<Texture> &&textures,
|
||||
eastl::vector<TextureHandle> &&textureHandles, const StorageBuffer &materialsBuffer,
|
||||
BufferHandle materialsHandle, const StorageBuffer &vertexPosBuffer, BufferHandle vertexPosHandle,
|
||||
const StorageBuffer &normalBuffer, BufferHandle normalHandle, const StorageBuffer &uv0Buffer,
|
||||
BufferHandle uv0Handle, const IndexBuffer &indexBuffer, const eastl::vector<MeshPrimitive> &meshPrimitives);
|
||||
Model(GpuResourceManager *resourceManager, eastl::vector<TextureHandle> &&textureHandles,
|
||||
BufferHandle materialsHandle, BufferHandle vertexPosHandle, BufferHandle normalHandle, BufferHandle uv0Handle,
|
||||
const IndexBuffer &indexBuffer, const eastl::vector<MeshPrimitive> &meshPrimitives);
|
||||
|
||||
Model(Model &&other) noexcept;
|
||||
Model &operator=(Model &&other) noexcept;
|
||||
|
|
@ -80,18 +70,18 @@ struct Model
|
|||
|
||||
struct ModelLoader
|
||||
{
|
||||
RenderResourceManager *const m_ResourceManager;
|
||||
GpuResourceManager *const m_ResourceManager;
|
||||
vk::CommandPool m_CommandPool;
|
||||
vk::CommandBuffer m_CommandBuffer;
|
||||
vk::Queue m_TransferQueue;
|
||||
u32 m_TransferQueueIndex;
|
||||
u32 m_GraphicsQueueIndex;
|
||||
|
||||
ModelLoader(RenderResourceManager *resourceManager, vk::Queue transferQueue, u32 transferQueueIndex,
|
||||
ModelLoader(GpuResourceManager *resourceManager, vk::Queue transferQueue, u32 transferQueueIndex,
|
||||
u32 graphicsQueueIndex);
|
||||
~ModelLoader();
|
||||
|
||||
TextureHandle LoadImage(vk::CommandBuffer commandBuffer, Texture *texture, StagingBuffer *stagingBuffer,
|
||||
TextureHandle LoadImage(vk::CommandBuffer commandBuffer, StagingBuffer *stagingBuffer,
|
||||
tinygltf::Image *image) const;
|
||||
Model LoadModel(cstr path, cstr name = nullptr, bool batched = false);
|
||||
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ main(int, char **)
|
|||
Device device = {&context, &deviceToUse, &enabledDeviceFeatures, {queueAllocation}, "Primary Device"};
|
||||
vk::Queue commandQueue = device.GetQueue(queueAllocation.m_Family, 0);
|
||||
Swapchain swapchain = {&window, &device, "Primary Chain"};
|
||||
RenderResourceManager resourceManager = {&device, 1000};
|
||||
GpuResourceManager resourceManager = {&device, 1000};
|
||||
|
||||
ModelLoader modelLoader = {&resourceManager, commandQueue, queueAllocation.m_Family, queueAllocation.m_Family};
|
||||
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@
|
|||
#include <EASTL/array.h>
|
||||
|
||||
Pipeline
|
||||
CreatePipeline(const Device *device, const Swapchain *swapchain, const RenderResourceManager *resourceManager)
|
||||
CreatePipeline(const Device *device, const Swapchain *swapchain, const GpuResourceManager *resourceManager)
|
||||
{
|
||||
// Pipeline Setup
|
||||
auto vertexShaderModule = CreateShader(device, VERTEX_SHADER_FILE);
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@
|
|||
#include "global.h"
|
||||
#include "pipeline.h"
|
||||
|
||||
struct RenderResourceManager;
|
||||
struct GpuResourceManager;
|
||||
struct Swapchain;
|
||||
struct Device;
|
||||
|
||||
|
|
@ -22,4 +22,4 @@ struct Vertex
|
|||
};
|
||||
|
||||
vk::ShaderModule CreateShader(const Device *device, cstr shaderFile);
|
||||
Pipeline CreatePipeline(const Device *device, const Swapchain *swapchain, const RenderResourceManager *resourceManager);
|
||||
Pipeline CreatePipeline(const Device *device, const Swapchain *swapchain, const GpuResourceManager *resourceManager);
|
||||
|
|
|
|||
|
|
@ -12,25 +12,184 @@
|
|||
|
||||
#include <EASTL/array.h>
|
||||
|
||||
RenderResourceManager::WriteInfo::WriteInfo(vk::DescriptorBufferInfo info)
|
||||
void
|
||||
TextureManager::Init(const u32 maxCapacity)
|
||||
{
|
||||
m_MaxCapacity = maxCapacity;
|
||||
m_FreeHead = GpuResourceHandle::INVALID_HANDLE;
|
||||
}
|
||||
|
||||
TextureHandle
|
||||
TextureManager::Commit(Texture *texture)
|
||||
{
|
||||
ERROR_IF(!texture->IsValid() || !texture->IsOwned(), "Buffer must be valid and owned for commital")
|
||||
THEN_ABORT(-1);
|
||||
|
||||
if (m_FreeHead != GpuResourceHandle::INVALID_HANDLE)
|
||||
{
|
||||
const u32 index = m_FreeHead;
|
||||
|
||||
Texture *allocatedTexture = &m_Textures[index];
|
||||
|
||||
assert(!allocatedTexture->IsValid());
|
||||
m_FreeHead = *Recast<u32 *>(allocatedTexture);
|
||||
|
||||
// Ensure it is copyable.
|
||||
static_assert(std::is_trivially_copyable_v<Texture>);
|
||||
memcpy(allocatedTexture, texture, sizeof *texture);
|
||||
|
||||
// Take ownership of the buffer.
|
||||
texture->m_MipLevels_ &= ~Texture::OWNED_BIT;
|
||||
|
||||
return {index};
|
||||
}
|
||||
|
||||
const u32 index = Cast<u32>(m_Textures.size());
|
||||
if (index < m_MaxCapacity)
|
||||
{
|
||||
Texture *allocatedTexture = &m_Textures.push_back();
|
||||
|
||||
// Ensure it is copyable.
|
||||
static_assert(std::is_trivially_copyable_v<Texture>);
|
||||
memcpy(allocatedTexture, texture, sizeof *texture);
|
||||
|
||||
texture->m_MipLevels_ &= ~Texture::OWNED_BIT;
|
||||
|
||||
return {index};
|
||||
}
|
||||
|
||||
ERROR("Out of Buffers") THEN_ABORT(-1);
|
||||
}
|
||||
|
||||
Texture *
|
||||
TextureManager::Fetch(const TextureHandle handle)
|
||||
{
|
||||
assert(!handle.IsInvalid());
|
||||
|
||||
return &m_Textures[handle.m_Index];
|
||||
}
|
||||
|
||||
void
|
||||
TextureManager::Release(const Device *device, const TextureHandle handle)
|
||||
{
|
||||
assert(!handle.IsInvalid());
|
||||
|
||||
Texture *allocatedTexture = &m_Textures[handle.m_Index];
|
||||
allocatedTexture->Destroy(device);
|
||||
|
||||
assert(!allocatedTexture->IsValid());
|
||||
*Recast<u32 *>(allocatedTexture) = m_FreeHead;
|
||||
|
||||
m_FreeHead = handle.m_Index;
|
||||
}
|
||||
|
||||
void
|
||||
TextureManager::Destroy(const Device *device)
|
||||
{
|
||||
for (auto &texture : m_Textures)
|
||||
{
|
||||
texture.Destroy(device);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
BufferManager::Init(const u32 maxCapacity)
|
||||
{
|
||||
m_MaxCapacity = maxCapacity;
|
||||
m_FreeHead = GpuResourceHandle::INVALID_HANDLE;
|
||||
}
|
||||
|
||||
BufferHandle
|
||||
BufferManager::Commit(StorageBuffer *buffer)
|
||||
{
|
||||
ERROR_IF(!buffer->IsValid() || !buffer->IsOwned(), "Buffer must be valid and owned for commital") THEN_ABORT(-1);
|
||||
|
||||
if (m_FreeHead != GpuResourceHandle::INVALID_HANDLE)
|
||||
{
|
||||
const u32 index = m_FreeHead;
|
||||
|
||||
StorageBuffer *allocatedBuffer = &m_Buffers[index];
|
||||
|
||||
assert(!allocatedBuffer->IsValid());
|
||||
m_FreeHead = *Recast<u32 *>(allocatedBuffer);
|
||||
|
||||
// Ensure it is copyable.
|
||||
static_assert(std::is_trivially_copyable_v<StorageBuffer>);
|
||||
memcpy(allocatedBuffer, buffer, sizeof *buffer);
|
||||
|
||||
// Take ownership of the buffer.
|
||||
buffer->m_Size_ &= ~StorageBuffer::OWNED_BIT;
|
||||
|
||||
return {index};
|
||||
}
|
||||
|
||||
const u32 index = Cast<u32>(m_Buffers.size());
|
||||
if (index < m_MaxCapacity)
|
||||
{
|
||||
StorageBuffer *allocatedBuffer = &m_Buffers.push_back();
|
||||
|
||||
// Ensure it is copyable.
|
||||
static_assert(std::is_trivially_copyable_v<StorageBuffer>);
|
||||
memcpy(allocatedBuffer, buffer, sizeof *buffer);
|
||||
|
||||
buffer->m_Size_ &= ~StorageBuffer::OWNED_BIT;
|
||||
|
||||
return {index};
|
||||
}
|
||||
|
||||
ERROR("Out of Buffers") THEN_ABORT(-1);
|
||||
}
|
||||
|
||||
StorageBuffer *
|
||||
BufferManager::Fetch(const BufferHandle handle)
|
||||
{
|
||||
assert(!handle.IsInvalid());
|
||||
|
||||
return &m_Buffers[handle.m_Index];
|
||||
}
|
||||
|
||||
void
|
||||
BufferManager::Release(const Device *device, const BufferHandle handle)
|
||||
{
|
||||
assert(!handle.IsInvalid());
|
||||
|
||||
StorageBuffer *allocatedBuffer = &m_Buffers[handle.m_Index];
|
||||
allocatedBuffer->Destroy(device);
|
||||
|
||||
assert(!allocatedBuffer->IsValid());
|
||||
*Recast<u32 *>(allocatedBuffer) = m_FreeHead;
|
||||
|
||||
m_FreeHead = handle.m_Index;
|
||||
}
|
||||
|
||||
void
|
||||
BufferManager::Destroy(const Device *device)
|
||||
{
|
||||
for (auto& buffer : m_Buffers)
|
||||
{
|
||||
buffer.Destroy(device);
|
||||
}
|
||||
}
|
||||
|
||||
GpuResourceManager::WriteInfo::WriteInfo(vk::DescriptorBufferInfo info)
|
||||
: uBufferInfo(info)
|
||||
{
|
||||
}
|
||||
|
||||
RenderResourceManager::WriteInfo::WriteInfo(vk::DescriptorImageInfo info)
|
||||
GpuResourceManager::WriteInfo::WriteInfo(vk::DescriptorImageInfo info)
|
||||
: uImageInfo(info)
|
||||
{
|
||||
}
|
||||
|
||||
RenderResourceManager::WriteInfo::WriteInfo(vk::BufferView info)
|
||||
GpuResourceManager::WriteInfo::WriteInfo(vk::BufferView info)
|
||||
: uBufferView(info)
|
||||
{
|
||||
}
|
||||
|
||||
BufferHandle
|
||||
RenderResourceManager::Commit(const StorageBuffer *storageBuffer)
|
||||
GpuResourceManager::Commit(StorageBuffer *storageBuffer)
|
||||
{
|
||||
const u32 handle = m_BufferFreeList.Alloc();
|
||||
const BufferHandle handle = m_BufferManager.Commit(storageBuffer);
|
||||
|
||||
m_WriteInfos.emplace_back(vk::DescriptorBufferInfo{
|
||||
.buffer = storageBuffer->m_Buffer,
|
||||
|
|
@ -41,19 +200,23 @@ RenderResourceManager::Commit(const StorageBuffer *storageBuffer)
|
|||
m_Writes.push_back({
|
||||
.dstSet = m_DescriptorSet,
|
||||
.dstBinding = BUFFER_BINDING_INDEX,
|
||||
.dstArrayElement = handle,
|
||||
.dstArrayElement = handle.m_Index,
|
||||
.descriptorCount = 1,
|
||||
.descriptorType = vk::DescriptorType::eStorageBuffer,
|
||||
.pBufferInfo = &m_WriteInfos.back().uBufferInfo,
|
||||
});
|
||||
|
||||
m_WriteOwner.emplace_back(HandleType::eBuffer, handle);
|
||||
m_WriteOwner.emplace_back(HandleType::eBuffer, handle.m_Index);
|
||||
|
||||
return {handle};
|
||||
#if !defined(NDEBUG)
|
||||
++m_CommitedBufferCount;
|
||||
#endif
|
||||
|
||||
return handle;
|
||||
}
|
||||
|
||||
void
|
||||
RenderResourceManager::EraseWrites(u32 handleIndex, HandleType handleType)
|
||||
GpuResourceManager::EraseWrites(u32 handleIndex, HandleType handleType)
|
||||
{
|
||||
auto writeIter = m_Writes.begin();
|
||||
auto ownerIter = m_WriteOwner.begin();
|
||||
|
|
@ -76,31 +239,39 @@ RenderResourceManager::EraseWrites(u32 handleIndex, HandleType handleType)
|
|||
}
|
||||
|
||||
void
|
||||
RenderResourceManager::Release(BufferHandle handle)
|
||||
GpuResourceManager::Release(BufferHandle handle)
|
||||
{
|
||||
if (handle.IsInvalid())
|
||||
return;
|
||||
|
||||
EraseWrites(handle.m_Index, HandleType::eBuffer);
|
||||
|
||||
m_BufferFreeList.Free(handle.m_Index);
|
||||
m_BufferManager.Release(m_Device, handle);
|
||||
|
||||
#if !defined(NDEBUG)
|
||||
--m_CommitedBufferCount;
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
RenderResourceManager::Release(TextureHandle handle)
|
||||
GpuResourceManager::Release(TextureHandle handle)
|
||||
{
|
||||
if (handle.IsInvalid())
|
||||
return;
|
||||
|
||||
EraseWrites(handle.m_Index, HandleType::eTexture);
|
||||
|
||||
m_TextureFreeList.Free(handle.m_Index);
|
||||
m_TextureManager.Release(m_Device, handle);
|
||||
|
||||
#if !defined(NDEBUG)
|
||||
--m_CommitedTextureCount;
|
||||
#endif
|
||||
}
|
||||
|
||||
TextureHandle
|
||||
RenderResourceManager::Commit(const Texture *texture)
|
||||
GpuResourceManager::Commit(Texture* texture)
|
||||
{
|
||||
const u32 handle = m_TextureFreeList.Alloc();
|
||||
TextureHandle handle = m_TextureManager.Commit(texture);
|
||||
|
||||
m_WriteInfos.emplace_back(vk::DescriptorImageInfo{
|
||||
.sampler = nullptr,
|
||||
|
|
@ -111,19 +282,23 @@ RenderResourceManager::Commit(const Texture *texture)
|
|||
m_Writes.push_back({
|
||||
.dstSet = m_DescriptorSet,
|
||||
.dstBinding = TEXTURE_BINDING_INDEX,
|
||||
.dstArrayElement = handle,
|
||||
.dstArrayElement = handle.m_Index,
|
||||
.descriptorCount = 1,
|
||||
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
|
||||
.pImageInfo = &m_WriteInfos.back().uImageInfo,
|
||||
});
|
||||
|
||||
m_WriteOwner.emplace_back(HandleType::eBuffer, handle);
|
||||
m_WriteOwner.emplace_back(HandleType::eBuffer, handle.m_Index);
|
||||
|
||||
#if !defined(NDEBUG)
|
||||
++m_CommitedTextureCount;
|
||||
#endif
|
||||
|
||||
return {handle};
|
||||
}
|
||||
|
||||
void
|
||||
RenderResourceManager::Update()
|
||||
GpuResourceManager::Update()
|
||||
{
|
||||
if (m_Writes.empty() || m_WriteInfos.empty())
|
||||
return;
|
||||
|
|
@ -135,7 +310,7 @@ RenderResourceManager::Update()
|
|||
m_WriteOwner.clear();
|
||||
}
|
||||
|
||||
RenderResourceManager::RenderResourceManager(const Device *device, u16 maxSize)
|
||||
GpuResourceManager::GpuResourceManager(const Device *device, u16 maxSize)
|
||||
: m_Device(device)
|
||||
{
|
||||
vk::PhysicalDeviceProperties properties;
|
||||
|
|
@ -166,8 +341,8 @@ RenderResourceManager::RenderResourceManager(const Device *device, u16 maxSize)
|
|||
INFO("Max Buffer Count: {}", buffersCount);
|
||||
INFO("Max Texture Count: {}", texturesCount);
|
||||
|
||||
m_BufferFreeList.Init(buffersCount);
|
||||
m_TextureFreeList.Init(texturesCount);
|
||||
m_BufferManager.Init(buffersCount);
|
||||
m_TextureManager.Init(texturesCount);
|
||||
|
||||
eastl::array poolSizes = {
|
||||
vk::DescriptorPoolSize{
|
||||
|
|
@ -225,9 +400,54 @@ RenderResourceManager::RenderResourceManager(const Device *device, u16 maxSize)
|
|||
m_Device->SetName(m_DescriptorSet, "Bindless Set");
|
||||
}
|
||||
|
||||
RenderResourceManager::~RenderResourceManager()
|
||||
GpuResourceManager::~GpuResourceManager()
|
||||
{
|
||||
#if !defined(NDEBUG)
|
||||
WARN_IF(m_CommitedBufferCount > 0 || m_CommitedTextureCount > 0, "Resources alive: SSBO = {}, Textures = {}",
|
||||
m_CommitedBufferCount, m_CommitedTextureCount);
|
||||
#endif
|
||||
|
||||
m_BufferManager.Destroy(m_Device);
|
||||
m_Device->m_Device.destroy(m_Sampler, nullptr);
|
||||
m_Device->m_Device.destroy(m_DescriptorPool, nullptr);
|
||||
m_Device->m_Device.destroy(m_SetLayout, nullptr);
|
||||
}
|
||||
|
||||
GpuResourceManager::GpuResourceManager(GpuResourceManager &&other) noexcept
|
||||
: m_WriteInfos(std::move(other.m_WriteInfos)),
|
||||
m_Writes(std::move(other.m_Writes)),
|
||||
m_WriteOwner(std::move(other.m_WriteOwner)),
|
||||
m_Sampler(other.m_Sampler),
|
||||
m_BufferManager(std::move(other.m_BufferManager)),
|
||||
m_TextureManager(std::move(other.m_TextureManager)),
|
||||
m_Device(Take(other.m_Device)),
|
||||
m_DescriptorPool(other.m_DescriptorPool),
|
||||
m_SetLayout(other.m_SetLayout),
|
||||
m_DescriptorSet(other.m_DescriptorSet),
|
||||
m_CommitedBufferCount(other.m_CommitedBufferCount),
|
||||
m_CommitedTextureCount(other.m_CommitedTextureCount)
|
||||
{
|
||||
assert(!other.m_Device);
|
||||
}
|
||||
|
||||
GpuResourceManager &
|
||||
GpuResourceManager::operator=(GpuResourceManager &&other) noexcept
|
||||
{
|
||||
if (this == &other)
|
||||
return *this;
|
||||
m_WriteInfos = std::move(other.m_WriteInfos);
|
||||
m_Writes = std::move(other.m_Writes);
|
||||
m_WriteOwner = std::move(other.m_WriteOwner);
|
||||
m_Sampler = other.m_Sampler;
|
||||
m_BufferManager = std::move(other.m_BufferManager);
|
||||
m_TextureManager = std::move(other.m_TextureManager);
|
||||
m_Device = Take(other.m_Device); // Ensure taken.
|
||||
m_DescriptorPool = other.m_DescriptorPool;
|
||||
m_SetLayout = other.m_SetLayout;
|
||||
m_DescriptorSet = other.m_DescriptorSet;
|
||||
m_CommitedBufferCount = other.m_CommitedBufferCount;
|
||||
m_CommitedTextureCount = other.m_CommitedTextureCount;
|
||||
|
||||
assert(!other.m_Device);
|
||||
return *this;
|
||||
}
|
||||
|
|
@ -5,8 +5,9 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include "buffer.h"
|
||||
#include "global.h"
|
||||
#include "buffer.h"
|
||||
#include "image.h"
|
||||
|
||||
#include <EASTL/deque.h>
|
||||
#include <EASTL/stack.h>
|
||||
|
|
@ -16,7 +17,7 @@ struct Device;
|
|||
struct Texture;
|
||||
struct UniformStorageBuffer;
|
||||
|
||||
struct RenderResourceManager;
|
||||
struct GpuResourceManager;
|
||||
|
||||
struct GpuResourceHandle
|
||||
{
|
||||
|
|
@ -38,54 +39,46 @@ struct TextureHandle : GpuResourceHandle
|
|||
{
|
||||
};
|
||||
|
||||
struct FreeList
|
||||
struct TextureManager
|
||||
{
|
||||
eastl::stack<u32, eastl::deque<u32>> m_List;
|
||||
u32 m_MaxVisited = 0;
|
||||
u32 m_MaxCapacity = 16;
|
||||
eastl::vector<Texture> m_Textures;
|
||||
u32 m_MaxCapacity;
|
||||
u32 m_FreeHead;
|
||||
|
||||
void
|
||||
Init(u32 maxCapacity)
|
||||
{
|
||||
// MaxValue<u32> is 'invalid-handle' so you can't use it as a handle.
|
||||
assert(maxCapacity < GpuResourceHandle::INVALID_HANDLE);
|
||||
m_MaxCapacity = maxCapacity;
|
||||
}
|
||||
|
||||
[[nodiscard]] u32
|
||||
Alloc()
|
||||
{
|
||||
if (!m_List.empty())
|
||||
{
|
||||
const u32 value = m_List.top();
|
||||
m_List.pop();
|
||||
return value;
|
||||
}
|
||||
if (m_MaxVisited < m_MaxCapacity)
|
||||
{
|
||||
return m_MaxVisited++;
|
||||
}
|
||||
ERROR("Out of Handles.") THEN_ABORT(-1);
|
||||
}
|
||||
|
||||
void
|
||||
Free(u32 index)
|
||||
{
|
||||
WARN_IF(index >= m_MaxCapacity, "Trying to free an out-of-bounds index.");
|
||||
|
||||
if (index < m_MaxCapacity)
|
||||
m_List.push(index);
|
||||
}
|
||||
Init(u32 maxCapacity);
|
||||
TextureHandle Commit(Texture *texture);
|
||||
Texture *Fetch(TextureHandle handle);
|
||||
void Release(const Device *device, TextureHandle handle);
|
||||
void Destroy(const Device *device);
|
||||
};
|
||||
|
||||
struct RenderResourceManager
|
||||
struct BufferManager
|
||||
{
|
||||
private:
|
||||
union WriteInfo {
|
||||
eastl::vector<StorageBuffer> m_Buffers;
|
||||
u32 m_MaxCapacity;
|
||||
u32 m_FreeHead;
|
||||
|
||||
void Init(u32 maxCapacity);
|
||||
BufferHandle Commit(StorageBuffer *buffer);
|
||||
StorageBuffer *Fetch(BufferHandle handle);
|
||||
void Release(const Device *device, BufferHandle handle);
|
||||
void Destroy(const Device *device);
|
||||
};
|
||||
|
||||
struct GpuResourceManager
|
||||
{
|
||||
private:
|
||||
union WriteInfo
|
||||
{
|
||||
vk::DescriptorBufferInfo uBufferInfo;
|
||||
vk::DescriptorImageInfo uImageInfo;
|
||||
vk::BufferView uBufferView;
|
||||
|
||||
WriteInfo()
|
||||
{
|
||||
}
|
||||
|
||||
explicit WriteInfo(vk::DescriptorBufferInfo info);
|
||||
explicit WriteInfo(vk::DescriptorImageInfo info);
|
||||
explicit WriteInfo(vk::BufferView info);
|
||||
|
|
@ -105,12 +98,13 @@ struct RenderResourceManager
|
|||
|
||||
vk::Sampler m_Sampler;
|
||||
|
||||
FreeList m_BufferFreeList;
|
||||
FreeList m_TextureFreeList;
|
||||
//FreeList m_BufferFreeList;
|
||||
BufferManager m_BufferManager;
|
||||
TextureManager m_TextureManager;
|
||||
|
||||
void EraseWrites(u32 handleIndex, HandleType handleType);
|
||||
|
||||
public:
|
||||
public:
|
||||
const Device *m_Device;
|
||||
|
||||
constexpr static u32 BUFFER_BINDING_INDEX = 0;
|
||||
|
|
@ -120,16 +114,24 @@ struct RenderResourceManager
|
|||
vk::DescriptorSetLayout m_SetLayout;
|
||||
vk::DescriptorSet m_DescriptorSet;
|
||||
|
||||
BufferHandle Commit(const StorageBuffer *storageBuffer);
|
||||
BufferHandle Commit(StorageBuffer *storageBuffer);
|
||||
void Release(BufferHandle handle);
|
||||
TextureHandle Commit(const Texture *texture);
|
||||
TextureHandle Commit(Texture *texture);
|
||||
void Release(TextureHandle handle);
|
||||
|
||||
void Update();
|
||||
|
||||
// Ctor/Dtor
|
||||
RenderResourceManager(const Device *device, u16 maxSize);
|
||||
~RenderResourceManager();
|
||||
GpuResourceManager(const Device *device, u16 maxSize);
|
||||
~GpuResourceManager();
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(RenderResourceManager);
|
||||
GpuResourceManager(GpuResourceManager &&other) noexcept;
|
||||
GpuResourceManager &operator=(GpuResourceManager &&other) noexcept;
|
||||
|
||||
#if !defined(NDEBUG)
|
||||
usize m_CommitedBufferCount = 0;
|
||||
usize m_CommitedTextureCount = 0;
|
||||
#endif
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(GpuResourceManager);
|
||||
};
|
||||
Loading…
Reference in New Issue