944 lines
27 KiB
C++
944 lines
27 KiB
C++
// =============================================
|
|
// Aster: render_resource_manager.cpp
|
|
// Copyright (c) 2020-2024 Anish Bhobe
|
|
// =============================================
|
|
|
|
#include "render_resource_manager.h"
|
|
|
|
#include "buffer.h"
|
|
#include "device.h"
|
|
#include "helpers.h"
|
|
#include "image.h"
|
|
|
|
#include <EASTL/array.h>
|
|
|
|
void
|
|
TextureManager::Init(const u32 maxCapacity)
|
|
{
|
|
m_MaxCapacity = maxCapacity;
|
|
m_FreeHead = GpuResourceHandle::INVALID_HANDLE;
|
|
}
|
|
|
|
TextureHandle
|
|
TextureManager::Commit(Texture *texture)
|
|
{
|
|
ERROR_IF(!texture || !texture->IsValid(), "Texture must be valid for commital")
|
|
THEN_ABORT(-1);
|
|
|
|
if (m_FreeHead != GpuResourceHandle::INVALID_HANDLE)
|
|
{
|
|
const u32 index = m_FreeHead;
|
|
|
|
Texture *allocatedTexture = &m_Textures[index];
|
|
|
|
assert(!allocatedTexture->IsValid());
|
|
m_FreeHead = *Recast<u32 *>(allocatedTexture);
|
|
|
|
// Ensure it is copyable.
|
|
static_assert(std::is_trivially_copyable_v<Texture>);
|
|
*allocatedTexture = *texture;
|
|
|
|
// Take ownership of the texture.
|
|
texture->m_Flags_ &= ~Texture::OWNED_BIT;
|
|
|
|
return {index};
|
|
}
|
|
|
|
const u32 index = Cast<u32>(m_Textures.size());
|
|
if (index < m_MaxCapacity)
|
|
{
|
|
Texture *allocatedTexture = &m_Textures.push_back();
|
|
|
|
// Ensure it is copyable.
|
|
static_assert(std::is_trivially_copyable_v<Texture>);
|
|
*allocatedTexture = *texture;
|
|
|
|
texture->m_Flags_ &= ~Texture::OWNED_BIT;
|
|
|
|
return {index};
|
|
}
|
|
|
|
ERROR("Out of Buffers") THEN_ABORT(-1);
|
|
}
|
|
|
|
Texture *
|
|
TextureManager::Fetch(const TextureHandle handle)
|
|
{
|
|
assert(!handle.IsInvalid());
|
|
|
|
return &m_Textures[handle.m_Index];
|
|
}
|
|
|
|
void
|
|
TextureManager::Release(const Device *device, const TextureHandle handle)
|
|
{
|
|
assert(!handle.IsInvalid());
|
|
|
|
Texture *allocatedTexture = &m_Textures[handle.m_Index];
|
|
allocatedTexture->Destroy(device);
|
|
|
|
assert(!allocatedTexture->IsValid());
|
|
*Recast<u32 *>(allocatedTexture) = m_FreeHead;
|
|
|
|
m_FreeHead = handle.m_Index;
|
|
}
|
|
|
|
void
|
|
TextureManager::Destroy(const Device *device)
|
|
{
|
|
for (auto &texture : m_Textures)
|
|
{
|
|
texture.Destroy(device);
|
|
}
|
|
}
|
|
|
|
void
|
|
BufferManager::Init(const u32 maxCapacity)
|
|
{
|
|
m_MaxCapacity = maxCapacity;
|
|
m_FreeHead = GpuResourceHandle::INVALID_HANDLE;
|
|
}
|
|
|
|
BufferHandle
|
|
BufferManager::Commit(StorageBuffer *buffer)
|
|
{
|
|
ERROR_IF(!buffer || !buffer->IsValid() || !buffer->IsOwned(), "Buffer must be valid and owned for commital")
|
|
THEN_ABORT(-1);
|
|
|
|
if (m_FreeHead != GpuResourceHandle::INVALID_HANDLE)
|
|
{
|
|
const u32 index = m_FreeHead;
|
|
|
|
StorageBuffer *allocatedBuffer = &m_Buffers[index];
|
|
|
|
assert(!allocatedBuffer->IsValid());
|
|
m_FreeHead = *Recast<u32 *>(allocatedBuffer);
|
|
|
|
// Ensure it is copyable.
|
|
static_assert(std::is_trivially_copyable_v<StorageBuffer>);
|
|
*allocatedBuffer = *buffer;
|
|
|
|
// Take ownership of the buffer.
|
|
buffer->m_Size_ &= ~StorageBuffer::OWNED_BIT;
|
|
|
|
return {index};
|
|
}
|
|
|
|
const u32 index = Cast<u32>(m_Buffers.size());
|
|
if (index < m_MaxCapacity)
|
|
{
|
|
StorageBuffer *allocatedBuffer = &m_Buffers.push_back();
|
|
|
|
// Ensure it is copyable.
|
|
static_assert(std::is_trivially_copyable_v<StorageBuffer>);
|
|
*allocatedBuffer = *buffer;
|
|
|
|
buffer->m_Size_ &= ~StorageBuffer::OWNED_BIT;
|
|
|
|
return {index};
|
|
}
|
|
|
|
ERROR("Out of Buffers") THEN_ABORT(-1);
|
|
}
|
|
|
|
StorageBuffer *
|
|
BufferManager::Fetch(const BufferHandle handle)
|
|
{
|
|
assert(!handle.IsInvalid());
|
|
|
|
return &m_Buffers[handle.m_Index];
|
|
}
|
|
|
|
void
|
|
BufferManager::Release(const Device *device, const BufferHandle handle)
|
|
{
|
|
assert(!handle.IsInvalid());
|
|
|
|
StorageBuffer *allocatedBuffer = &m_Buffers[handle.m_Index];
|
|
allocatedBuffer->Destroy(device);
|
|
|
|
assert(!allocatedBuffer->IsValid());
|
|
*Recast<u32 *>(allocatedBuffer) = m_FreeHead;
|
|
|
|
m_FreeHead = handle.m_Index;
|
|
}
|
|
|
|
void
|
|
BufferManager::Destroy(const Device *device)
|
|
{
|
|
for (auto &buffer : m_Buffers)
|
|
{
|
|
buffer.Destroy(device);
|
|
}
|
|
}
|
|
|
|
StorageTextureHandle
|
|
StorageTextureManager::Commit(StorageTexture *texture)
|
|
{
|
|
const TextureHandle tx = TextureManager::Commit(texture);
|
|
return {tx.m_Index};
|
|
}
|
|
|
|
StorageTexture *
|
|
StorageTextureManager::Fetch(const StorageTextureHandle handle)
|
|
{
|
|
assert(!handle.IsInvalid());
|
|
|
|
return Recast<StorageTexture *>(&m_Textures[handle.m_Index]);
|
|
}
|
|
|
|
void
|
|
StorageTextureManager::Release(const Device *device, const StorageTextureHandle handle)
|
|
{
|
|
TextureManager::Release(device, {handle.m_Index});
|
|
}
|
|
|
|
usize
|
|
HashSamplerCreateInfo(const vk::SamplerCreateInfo *createInfo)
|
|
{
|
|
usize hash = HashAny(createInfo->flags);
|
|
hash = HashCombine(hash, HashAny(createInfo->magFilter));
|
|
hash = HashCombine(hash, HashAny(createInfo->minFilter));
|
|
hash = HashCombine(hash, HashAny(createInfo->mipmapMode));
|
|
hash = HashCombine(hash, HashAny(createInfo->addressModeU));
|
|
hash = HashCombine(hash, HashAny(createInfo->addressModeV));
|
|
hash = HashCombine(hash, HashAny(createInfo->addressModeW));
|
|
hash = HashCombine(hash, HashAny(Cast<usize>(createInfo->mipLodBias * 1000))); // Resolution of 10^-3
|
|
hash = HashCombine(hash, HashAny(createInfo->anisotropyEnable));
|
|
hash = HashCombine(hash,
|
|
HashAny(Cast<usize>(createInfo->maxAnisotropy * 0x10))); // 16:1 Anisotropy is enough resolution
|
|
hash = HashCombine(hash, HashAny(createInfo->compareEnable));
|
|
hash = HashCombine(hash, HashAny(createInfo->compareOp));
|
|
hash = HashCombine(hash, HashAny(Cast<usize>(createInfo->minLod * 1000))); // 0.001 resolution is enough.
|
|
hash = HashCombine(hash,
|
|
HashAny(Cast<usize>(createInfo->maxLod * 1000))); // 0.001 resolution is enough. (1 == NO Clamp)
|
|
hash = HashCombine(hash, HashAny(createInfo->borderColor));
|
|
hash = HashCombine(hash, HashAny(createInfo->unnormalizedCoordinates));
|
|
|
|
return hash;
|
|
}
|
|
|
|
void
|
|
SamplerManager::Init(usize size)
|
|
{
|
|
m_Samplers.reserve(size);
|
|
m_SamplerHashes.reserve(size);
|
|
}
|
|
|
|
SamplerHandle
|
|
SamplerManager::Create(const Device *device, const vk::SamplerCreateInfo *createInfo)
|
|
{
|
|
const usize hash = HashSamplerCreateInfo(createInfo);
|
|
|
|
for (u32 index = 0; usize samplerHash : m_SamplerHashes)
|
|
{
|
|
if (samplerHash == hash)
|
|
{
|
|
return {index};
|
|
}
|
|
++index;
|
|
}
|
|
|
|
vk::Sampler sampler;
|
|
AbortIfFailed(device->m_Device.createSampler(createInfo, nullptr, &sampler));
|
|
const u32 index = Cast<u32>(m_SamplerHashes.size());
|
|
m_SamplerHashes.push_back(hash);
|
|
m_Samplers.push_back(sampler);
|
|
return {index};
|
|
}
|
|
|
|
vk::Sampler
|
|
SamplerManager::Fetch(const SamplerHandle handle)
|
|
{
|
|
assert(!handle.IsInvalid());
|
|
|
|
return m_Samplers[handle.m_Index];
|
|
}
|
|
|
|
void
|
|
SamplerManager::Destroy(const Device *device)
|
|
{
|
|
for (const auto &sampler : m_Samplers)
|
|
{
|
|
device->m_Device.destroy(sampler, nullptr);
|
|
}
|
|
m_Samplers.clear();
|
|
m_SamplerHashes.clear();
|
|
}
|
|
|
|
void
|
|
VirtualizedBufferPool::InitStorage(const Device *device, usize bufferMaxSize)
|
|
{
|
|
auto buffer = std::make_unique<StorageBuffer>();
|
|
buffer->Init(device, bufferMaxSize, true, true, "Unified Geometry Buffer");
|
|
m_BackingBuffer = std::move(buffer);
|
|
|
|
vk::BufferDeviceAddressInfo addressInfo = {
|
|
.buffer = m_BackingBuffer->m_Buffer,
|
|
};
|
|
m_BufferPtr = device->m_Device.getBufferAddress(&addressInfo);
|
|
|
|
const VmaVirtualBlockCreateInfo virtualBlockCreateInfo = {
|
|
.size = bufferMaxSize,
|
|
};
|
|
AbortIfFailed(Cast<vk::Result>(vmaCreateVirtualBlock(&virtualBlockCreateInfo, &m_Block)));
|
|
}
|
|
|
|
void
|
|
VirtualizedBufferPool::InitIndex(const Device *device, usize bufferMaxSize)
|
|
{
|
|
auto buffer = std::make_unique<StorageIndexBuffer>();
|
|
buffer->Init(device, bufferMaxSize, true, true, "Unified Index Buffer");
|
|
m_BackingBuffer = std::move(buffer);
|
|
|
|
vk::BufferDeviceAddressInfo addressInfo = {
|
|
.buffer = m_BackingBuffer->m_Buffer,
|
|
};
|
|
m_BufferPtr = device->m_Device.getBufferAddress(&addressInfo);
|
|
|
|
const VmaVirtualBlockCreateInfo virtualBlockCreateInfo = {
|
|
.size = bufferMaxSize,
|
|
};
|
|
AbortIfFailed(Cast<vk::Result>(vmaCreateVirtualBlock(&virtualBlockCreateInfo, &m_Block)));
|
|
}
|
|
|
|
void
|
|
VirtualizedBufferPool::UpdateToGpu(const Device *device)
|
|
{
|
|
// Unrequired until adding the non-ReBAR support.
|
|
}
|
|
|
|
VirtualizedBufferHandle
|
|
VirtualizedBufferPool::Create(usize size, usize alignment)
|
|
{
|
|
const VmaVirtualAllocationCreateInfo virtualAllocationCreateInfo = {
|
|
.size = size,
|
|
.alignment = alignment,
|
|
};
|
|
VmaVirtualAllocation allocation;
|
|
usize offset;
|
|
AbortIfFailed(vmaVirtualAllocate(m_Block, &virtualAllocationCreateInfo, &allocation, &offset));
|
|
const VirtualBuffer virtualBuffer = {
|
|
.m_Allocation = allocation,
|
|
.m_Offset = offset,
|
|
.m_Size = size,
|
|
};
|
|
|
|
u32 index;
|
|
VirtualBuffer *allocVBuf;
|
|
if (m_FreeHead != GpuResourceHandle::INVALID_HANDLE)
|
|
{
|
|
index = m_FreeHead;
|
|
allocVBuf = &m_VirtualBuffers[index];
|
|
|
|
m_FreeHead = *Recast<u32 *>(allocVBuf);
|
|
}
|
|
else
|
|
{
|
|
index = Cast<u32>(m_VirtualBuffers.size());
|
|
allocVBuf = &m_VirtualBuffers.push_back();
|
|
}
|
|
|
|
// Ensure it is copyable.
|
|
static_assert(std::is_trivially_copyable_v<Texture>);
|
|
*allocVBuf = virtualBuffer;
|
|
m_Dirty = true;
|
|
|
|
return {index};
|
|
}
|
|
|
|
uptr
|
|
VirtualizedBufferPool::FetchOffset(VirtualizedBufferHandle handle)
|
|
{
|
|
assert(!handle.IsInvalid());
|
|
return m_VirtualBuffers[handle.m_Index].m_Offset;
|
|
}
|
|
|
|
void
|
|
VirtualizedBufferPool::Release(VirtualizedBufferHandle handle)
|
|
{
|
|
assert(!handle.IsInvalid());
|
|
|
|
VirtualBuffer *virtualBuffer = &m_VirtualBuffers[handle.m_Index];
|
|
vmaVirtualFree(m_Block, virtualBuffer->m_Allocation);
|
|
|
|
*Recast<u32 *>(virtualBuffer) = m_FreeHead;
|
|
|
|
m_FreeHead = handle.m_Index;
|
|
}
|
|
|
|
void
|
|
VirtualizedBufferPool::Write(VirtualizedBufferHandle handle, usize offset, usize size, const void *data)
|
|
{
|
|
if (handle.IsInvalid())
|
|
return;
|
|
|
|
assert(m_BackingBuffer->IsMapped() && "Non ReBAR not supported.");
|
|
|
|
const VirtualBuffer *virtualBuffer = &m_VirtualBuffers[handle.m_Index];
|
|
assert(offset + size <= virtualBuffer->m_Size);
|
|
|
|
u8 *target = m_BackingBuffer->m_Mapped + virtualBuffer->m_Offset + offset;
|
|
memcpy(target, data, size);
|
|
}
|
|
|
|
void
|
|
VirtualizedBufferPool::Destroy(const Device *device)
|
|
{
|
|
m_BackingBuffer->Destroy(device);
|
|
m_BackingBuffer.reset();
|
|
}
|
|
|
|
RenderResourceManager::WriteInfo::WriteInfo(vk::DescriptorBufferInfo info)
|
|
: uBufferInfo(info)
|
|
{
|
|
}
|
|
|
|
RenderResourceManager::WriteInfo::WriteInfo(vk::DescriptorImageInfo info)
|
|
: uImageInfo(info)
|
|
{
|
|
}
|
|
|
|
RenderResourceManager::WriteInfo::WriteInfo(vk::BufferView info)
|
|
: uBufferView(info)
|
|
{
|
|
}
|
|
|
|
BufferHandle
|
|
RenderResourceManager::Commit(StorageBuffer *storageBuffer)
|
|
{
|
|
const BufferHandle handle = m_BufferManager.Commit(storageBuffer);
|
|
|
|
m_WriteInfos.emplace_back(vk::DescriptorBufferInfo{
|
|
.buffer = storageBuffer->m_Buffer,
|
|
.offset = 0,
|
|
.range = storageBuffer->GetSize(),
|
|
});
|
|
|
|
m_Writes.push_back({
|
|
.dstSet = m_DescriptorSet,
|
|
.dstBinding = BUFFER_BINDING_INDEX,
|
|
.dstArrayElement = handle.m_Index,
|
|
.descriptorCount = 1,
|
|
.descriptorType = vk::DescriptorType::eStorageBuffer,
|
|
.pBufferInfo = &m_WriteInfos.back().uBufferInfo,
|
|
});
|
|
|
|
m_WriteOwner.emplace_back(HandleType::eBuffer, handle.m_Index);
|
|
|
|
#if !defined(ASTER_NDEBUG)
|
|
++m_CommitedBufferCount;
|
|
#endif
|
|
|
|
return handle;
|
|
}
|
|
|
|
void
|
|
RenderResourceManager::Write(const BufferHandle handle, const usize offset, const usize size, const void *data)
|
|
{
|
|
m_BufferManager.Fetch(handle)->Write(m_Device, offset, size, data);
|
|
}
|
|
|
|
void
|
|
RenderResourceManager::EraseWrites(u32 handleIndex, HandleType handleType)
|
|
{
|
|
auto writeIter = m_Writes.begin();
|
|
auto ownerIter = m_WriteOwner.begin();
|
|
const auto ownerEnd = m_WriteOwner.end();
|
|
|
|
while (ownerIter != ownerEnd)
|
|
{
|
|
if (ownerIter->first == handleType && ownerIter->second == handleIndex)
|
|
{
|
|
*writeIter = m_Writes.back();
|
|
*ownerIter = m_WriteOwner.back();
|
|
m_Writes.pop_back();
|
|
m_WriteOwner.pop_back();
|
|
return;
|
|
}
|
|
|
|
++ownerIter;
|
|
++writeIter;
|
|
}
|
|
}
|
|
|
|
void
|
|
RenderResourceManager::Release(BufferHandle handle)
|
|
{
|
|
if (handle.IsInvalid())
|
|
return;
|
|
|
|
EraseWrites(handle.m_Index, HandleType::eBuffer);
|
|
|
|
m_BufferManager.Release(m_Device, handle);
|
|
|
|
#if !defined(ASTER_NDEBUG)
|
|
--m_CommitedBufferCount;
|
|
#endif
|
|
}
|
|
|
|
void
|
|
RenderResourceManager::Release(StorageBuffer *storageBuffer, const BufferHandle handle)
|
|
{
|
|
assert(storageBuffer);
|
|
assert(!storageBuffer->IsValid());
|
|
|
|
StorageBuffer *internal = m_BufferManager.Fetch(handle);
|
|
*storageBuffer = *internal;
|
|
internal->m_Size_ &= ~StorageBuffer::OWNED_BIT;
|
|
|
|
Release(handle);
|
|
}
|
|
|
|
void
|
|
RenderResourceManager::Release(TextureHandle handle)
|
|
{
|
|
if (handle.IsInvalid())
|
|
return;
|
|
|
|
EraseWrites(handle.m_Index, HandleType::eTexture);
|
|
|
|
m_TextureManager.Release(m_Device, handle);
|
|
|
|
#if !defined(ASTER_NDEBUG)
|
|
--m_CommitedTextureCount;
|
|
#endif
|
|
}
|
|
|
|
void
|
|
RenderResourceManager::Release(Texture *texture, TextureHandle handle)
|
|
{
|
|
assert(texture);
|
|
assert(!texture->IsValid());
|
|
|
|
Texture *internal = m_TextureManager.Fetch(handle);
|
|
*texture = *internal;
|
|
internal->m_Flags_ &= ~Texture::OWNED_BIT;
|
|
|
|
Release(handle);
|
|
}
|
|
|
|
TextureHandle
|
|
RenderResourceManager::CommitTexture(Texture *texture, const SamplerHandle sampler)
|
|
{
|
|
TextureHandle handle = m_TextureManager.Commit(texture);
|
|
|
|
const vk::Sampler samplerImpl = sampler.IsInvalid() ? m_DefaultSampler : m_SamplerManager.Fetch(sampler);
|
|
|
|
m_WriteInfos.emplace_back(vk::DescriptorImageInfo{
|
|
.sampler = samplerImpl,
|
|
.imageView = texture->m_View,
|
|
.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
|
|
});
|
|
|
|
m_Writes.push_back({
|
|
.dstSet = m_DescriptorSet,
|
|
.dstBinding = TEXTURE_BINDING_INDEX,
|
|
.dstArrayElement = handle.m_Index,
|
|
.descriptorCount = 1,
|
|
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
|
|
.pImageInfo = &m_WriteInfos.back().uImageInfo,
|
|
});
|
|
|
|
m_WriteOwner.emplace_back(HandleType::eTexture, handle.m_Index);
|
|
|
|
#if !defined(ASTER_NDEBUG)
|
|
++m_CommitedTextureCount;
|
|
#endif
|
|
|
|
return {handle};
|
|
}
|
|
|
|
StorageTextureHandle
|
|
RenderResourceManager::CommitStorageTexture(StorageTexture *storageTexture, SamplerHandle sampler)
|
|
{
|
|
StorageTextureHandle handle = m_StorageTextureManager.Commit(storageTexture);
|
|
|
|
vk::Sampler samplerImpl = sampler.IsInvalid() ? m_DefaultSampler : m_SamplerManager.Fetch(sampler);
|
|
|
|
m_WriteInfos.emplace_back(vk::DescriptorImageInfo{
|
|
.sampler = samplerImpl,
|
|
.imageView = storageTexture->m_View,
|
|
.imageLayout = vk::ImageLayout::eGeneral,
|
|
});
|
|
|
|
m_Writes.push_back({
|
|
.dstSet = m_DescriptorSet,
|
|
.dstBinding = STORAGE_TEXTURE_BINDING_INDEX,
|
|
.dstArrayElement = handle.m_Index,
|
|
.descriptorCount = 1,
|
|
.descriptorType = vk::DescriptorType::eStorageImage,
|
|
.pImageInfo = &m_WriteInfos.back().uImageInfo,
|
|
});
|
|
|
|
m_WriteOwner.emplace_back(HandleType::eStorageTexture, handle.m_Index);
|
|
|
|
#if !defined(ASTER_NDEBUG)
|
|
++m_CommitedStorageTextureCount;
|
|
#endif
|
|
|
|
return {handle};
|
|
}
|
|
|
|
void
|
|
RenderResourceManager::Release(StorageTextureHandle handle)
|
|
{
|
|
if (handle.IsInvalid())
|
|
return;
|
|
|
|
EraseWrites(handle.m_Index, HandleType::eTexture);
|
|
|
|
m_StorageTextureManager.Release(m_Device, handle);
|
|
|
|
#if !defined(ASTER_NDEBUG)
|
|
--m_CommitedStorageTextureCount;
|
|
#endif
|
|
}
|
|
|
|
void
|
|
RenderResourceManager::Release(StorageTexture *texture, const StorageTextureHandle handle)
|
|
{
|
|
assert(texture);
|
|
assert(!texture->IsValid());
|
|
|
|
StorageTexture *internal = m_StorageTextureManager.Fetch(handle);
|
|
*texture = *internal;
|
|
internal->m_Flags_ &= ~StorageTexture::OWNED_BIT;
|
|
|
|
Release(handle);
|
|
}
|
|
|
|
void
|
|
RenderResourceManager::Update()
|
|
{
|
|
// Descriptor Updates
|
|
if (!m_Writes.empty())
|
|
{
|
|
m_Device->m_Device.updateDescriptorSets(Cast<u32>(m_Writes.size()), m_Writes.data(), 0, nullptr);
|
|
|
|
m_Writes.clear();
|
|
m_WriteInfos.clear();
|
|
m_WriteOwner.clear();
|
|
}
|
|
|
|
// Sub-system updates
|
|
m_Geometry.UpdateToGpu(m_Device);
|
|
}
|
|
|
|
RenderResourceManager::RenderResourceManager(Device *device, u16 maxSize, bool useBufferAddress)
|
|
: m_Device(device)
|
|
, m_UseBufferAddr(useBufferAddress)
|
|
{
|
|
vk::PhysicalDeviceProperties properties;
|
|
m_Device->m_PhysicalDevice.getProperties(&properties);
|
|
|
|
u32 buffersCount = eastl::min(properties.limits.maxPerStageDescriptorStorageBuffers - 1024, Cast<u32>(maxSize));
|
|
u32 texturesCount = eastl::min(properties.limits.maxPerStageDescriptorSampledImages - 1024, Cast<u32>(maxSize));
|
|
u32 storageTexturesCount =
|
|
eastl::min(properties.limits.maxPerStageDescriptorStorageImages - 1024, Cast<u32>(maxSize));
|
|
|
|
INFO("Max Buffer Count: {}", buffersCount);
|
|
INFO("Max Texture Count: {}", texturesCount);
|
|
INFO("Max Storage Texture Count: {}", storageTexturesCount);
|
|
|
|
m_Geometry.InitStorage(device, Megabyte(128u));
|
|
m_Index.InitIndex(device, Megabyte(8u));
|
|
m_Material.InitStorage(device, Kilobyte(560u));
|
|
m_BufferManager.Init(buffersCount);
|
|
m_TextureManager.Init(texturesCount);
|
|
m_StorageTextureManager.Init(storageTexturesCount);
|
|
m_SamplerManager.Init(storageTexturesCount);
|
|
|
|
m_DefaultSamplerCreateInfo = {
|
|
.magFilter = vk::Filter::eLinear,
|
|
.minFilter = vk::Filter::eLinear,
|
|
.mipmapMode = vk::SamplerMipmapMode::eLinear,
|
|
.addressModeU = vk::SamplerAddressMode::eRepeat,
|
|
.addressModeV = vk::SamplerAddressMode::eRepeat,
|
|
.addressModeW = vk::SamplerAddressMode::eRepeat,
|
|
.mipLodBias = 0.0f,
|
|
.anisotropyEnable = true,
|
|
.maxAnisotropy = properties.limits.maxSamplerAnisotropy,
|
|
.compareEnable = false,
|
|
.minLod = 0,
|
|
.maxLod = VK_LOD_CLAMP_NONE,
|
|
.borderColor = vk::BorderColor::eFloatOpaqueBlack,
|
|
.unnormalizedCoordinates = false,
|
|
};
|
|
|
|
m_DefaultSampler = m_SamplerManager.Fetch(m_SamplerManager.Create(device, &m_DefaultSamplerCreateInfo));
|
|
|
|
eastl::array poolSizes = {
|
|
vk::DescriptorPoolSize{
|
|
.type = vk::DescriptorType::eStorageBuffer,
|
|
.descriptorCount = buffersCount,
|
|
},
|
|
vk::DescriptorPoolSize{
|
|
.type = vk::DescriptorType::eCombinedImageSampler,
|
|
.descriptorCount = texturesCount,
|
|
},
|
|
vk::DescriptorPoolSize{
|
|
.type = vk::DescriptorType::eStorageImage,
|
|
.descriptorCount = storageTexturesCount,
|
|
},
|
|
};
|
|
|
|
const vk::DescriptorPoolCreateInfo poolCreateInfo = {
|
|
.flags = vk::DescriptorPoolCreateFlagBits::eUpdateAfterBind,
|
|
.maxSets = 1,
|
|
.poolSizeCount = Cast<u32>(poolSizes.size()),
|
|
.pPoolSizes = poolSizes.data(),
|
|
};
|
|
AbortIfFailed(device->m_Device.createDescriptorPool(&poolCreateInfo, nullptr, &m_DescriptorPool));
|
|
|
|
vk::DescriptorBindingFlags bindingFlags =
|
|
vk::DescriptorBindingFlagBits::ePartiallyBound | vk::DescriptorBindingFlagBits::eUpdateAfterBind;
|
|
eastl::array layoutBindingFlags = {
|
|
bindingFlags,
|
|
bindingFlags,
|
|
bindingFlags,
|
|
};
|
|
|
|
vk::DescriptorSetLayoutBindingFlagsCreateInfo bindingFlagsCreateInfo = {
|
|
.bindingCount = Cast<u32>(layoutBindingFlags.size()),
|
|
.pBindingFlags = layoutBindingFlags.data(),
|
|
};
|
|
|
|
eastl::array descriptorLayoutBindings = {
|
|
vk::DescriptorSetLayoutBinding{
|
|
.binding = BUFFER_BINDING_INDEX,
|
|
.descriptorType = vk::DescriptorType::eStorageBuffer,
|
|
.descriptorCount = Cast<u32>(buffersCount),
|
|
.stageFlags = vk::ShaderStageFlagBits::eAll,
|
|
},
|
|
vk::DescriptorSetLayoutBinding{
|
|
.binding = TEXTURE_BINDING_INDEX,
|
|
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
|
|
.descriptorCount = Cast<u32>(texturesCount),
|
|
.stageFlags = vk::ShaderStageFlagBits::eAll,
|
|
},
|
|
vk::DescriptorSetLayoutBinding{
|
|
.binding = STORAGE_TEXTURE_BINDING_INDEX,
|
|
.descriptorType = vk::DescriptorType::eStorageImage,
|
|
.descriptorCount = Cast<u32>(storageTexturesCount),
|
|
.stageFlags = vk::ShaderStageFlagBits::eAll,
|
|
},
|
|
};
|
|
static_assert(layoutBindingFlags.size() == descriptorLayoutBindings.size());
|
|
const vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = {
|
|
.pNext = &bindingFlagsCreateInfo,
|
|
.flags = vk::DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool,
|
|
.bindingCount = Cast<u32>(descriptorLayoutBindings.size()),
|
|
.pBindings = descriptorLayoutBindings.data(),
|
|
};
|
|
AbortIfFailed(device->m_Device.createDescriptorSetLayout(&descriptorSetLayoutCreateInfo, nullptr, &m_SetLayout));
|
|
|
|
// One descriptor is enough. Updating it at any time is safe. (Update until submit, data held when pending)
|
|
// https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VK_EXT_descriptor_indexing.html
|
|
// https://github.com/KhronosGroup/Vulkan-Guide/blob/main/chapters/extensions/VK_EXT_descriptor_indexing.adoc
|
|
const vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo = {
|
|
.descriptorPool = m_DescriptorPool,
|
|
.descriptorSetCount = 1,
|
|
.pSetLayouts = &m_SetLayout,
|
|
};
|
|
AbortIfFailed(device->m_Device.allocateDescriptorSets(&descriptorSetAllocateInfo, &m_DescriptorSet));
|
|
|
|
m_Device->SetName(m_SetLayout, "Bindless Layout");
|
|
m_Device->SetName(m_DescriptorPool, "Bindless Pool");
|
|
m_Device->SetName(m_DescriptorSet, "Bindless Set");
|
|
|
|
// NOTE: This needs to be synced with the destructor manually.
|
|
assert(Commit(m_Geometry.m_BackingBuffer.get()).m_Index == UNIFIED_GEOMETRY_DATA_HANDLE_INDEX); // Making an assumption to avoid extra bindings.
|
|
}
|
|
|
|
RenderResourceManager::~RenderResourceManager()
|
|
{
|
|
// NOTE: Matches the constructor.
|
|
Release(BufferHandle{0});
|
|
|
|
#if !defined(ASTER_NDEBUG)
|
|
WARN_IF(m_CommitedBufferCount > 0 || m_CommitedTextureCount > 0 || m_CommitedStorageTextureCount > 0,
|
|
"Resources alive: SSBO = {}, Textures = {}, RWTexture = {}", m_CommitedBufferCount, m_CommitedTextureCount,
|
|
m_CommitedStorageTextureCount);
|
|
#endif
|
|
|
|
m_Geometry.Destroy(m_Device);
|
|
m_Index.Destroy(m_Device);
|
|
m_Material.Destroy(m_Device);
|
|
m_BufferManager.Destroy(m_Device);
|
|
m_TextureManager.Destroy(m_Device);
|
|
m_StorageTextureManager.Destroy(m_Device);
|
|
m_SamplerManager.Destroy(m_Device);
|
|
m_Device->m_Device.destroy(m_DescriptorPool, nullptr);
|
|
m_Device->m_Device.destroy(m_SetLayout, nullptr);
|
|
}
|
|
|
|
RenderResourceManager::RenderResourceManager(RenderResourceManager &&other) noexcept
|
|
: m_WriteInfos(std::move(other.m_WriteInfos))
|
|
, m_Writes(std::move(other.m_Writes))
|
|
, m_WriteOwner(std::move(other.m_WriteOwner))
|
|
, m_Geometry(std::move(other.m_Geometry))
|
|
, m_Index(std::move(other.m_Index))
|
|
, m_Material(std::move(other.m_Material))
|
|
, m_BufferManager(std::move(other.m_BufferManager))
|
|
, m_TextureManager(std::move(other.m_TextureManager))
|
|
, m_StorageTextureManager(std::move(other.m_StorageTextureManager))
|
|
, m_SamplerManager(std::move(other.m_SamplerManager))
|
|
, m_Device(Take(other.m_Device))
|
|
, m_DescriptorPool(other.m_DescriptorPool)
|
|
, m_SetLayout(other.m_SetLayout)
|
|
, m_DescriptorSet(other.m_DescriptorSet)
|
|
, m_UseBufferAddr(other.m_UseBufferAddr)
|
|
#if !defined(ASTER_NDEBUG)
|
|
, m_CommitedBufferCount(other.m_CommitedBufferCount)
|
|
, m_CommitedTextureCount(other.m_CommitedTextureCount)
|
|
, m_CommitedStorageTextureCount(other.m_CommitedStorageTextureCount)
|
|
#endif
|
|
{
|
|
assert(!other.m_Device);
|
|
}
|
|
|
|
RenderResourceManager &
|
|
RenderResourceManager::operator=(RenderResourceManager &&other) noexcept
|
|
{
|
|
if (this == &other)
|
|
return *this;
|
|
m_WriteInfos = std::move(other.m_WriteInfos);
|
|
m_Writes = std::move(other.m_Writes);
|
|
m_WriteOwner = std::move(other.m_WriteOwner);
|
|
m_Geometry = std::move(other.m_Geometry);
|
|
m_Index = std::move(other.m_Index);
|
|
m_Material = std::move(other.m_Material);
|
|
m_BufferManager = std::move(other.m_BufferManager);
|
|
m_TextureManager = std::move(other.m_TextureManager);
|
|
m_StorageTextureManager = std::move(other.m_StorageTextureManager);
|
|
m_SamplerManager = std::move(other.m_SamplerManager);
|
|
m_Device = Take(other.m_Device); // Ensure taken.
|
|
m_DescriptorPool = other.m_DescriptorPool;
|
|
m_SetLayout = other.m_SetLayout;
|
|
m_DescriptorSet = other.m_DescriptorSet;
|
|
m_UseBufferAddr = other.m_UseBufferAddr;
|
|
#if !defined(ASTER_NDEBUG)
|
|
m_CommitedBufferCount = other.m_CommitedBufferCount;
|
|
m_CommitedTextureCount = other.m_CommitedTextureCount;
|
|
m_CommitedStorageTextureCount = other.m_CommitedStorageTextureCount;
|
|
#endif
|
|
|
|
assert(!other.m_Device);
|
|
return *this;
|
|
}
|
|
|
|
SamplerHandle
|
|
RenderResourceManager::CreateSampler(const vk::SamplerCreateInfo *samplerCreateInfo)
|
|
{
|
|
return m_SamplerManager.Create(m_Device, samplerCreateInfo);
|
|
}
|
|
|
|
GeometryHandle
|
|
RenderResourceManager::CreateGeometryBuffer(usize size, usize alignment, uptr* addr)
|
|
{
|
|
GeometryHandle handle = {m_Geometry.Create(size, alignment).m_Index};
|
|
|
|
if (addr)
|
|
{
|
|
*addr = FetchAddress(handle);
|
|
}
|
|
|
|
return handle;
|
|
}
|
|
|
|
uptr
|
|
RenderResourceManager::FetchAddress(GeometryHandle handle)
|
|
{
|
|
return (m_UseBufferAddr ? m_Geometry.m_BufferPtr : 0) + m_Geometry.FetchOffset(handle);
|
|
}
|
|
|
|
void
|
|
RenderResourceManager::Write(GeometryHandle handle, usize offset, usize size, const void *data)
|
|
{
|
|
m_Geometry.Write(handle, offset, size, data);
|
|
}
|
|
|
|
void
|
|
RenderResourceManager::Release(GeometryHandle handle)
|
|
{
|
|
if (handle.IsInvalid())
|
|
return;
|
|
|
|
m_Geometry.Release(handle);
|
|
}
|
|
|
|
MaterialHandle
|
|
RenderResourceManager::CreateMaterialBuffer(usize size, usize alignment, uptr* addr)
|
|
{
|
|
MaterialHandle handle = {m_Material.Create(size, alignment).m_Index};
|
|
|
|
if (addr)
|
|
{
|
|
*addr = FetchAddress(handle);
|
|
}
|
|
|
|
return handle;
|
|
}
|
|
|
|
usize
|
|
RenderResourceManager::FetchAddress(MaterialHandle handle)
|
|
{
|
|
return (m_UseBufferAddr ? m_Material.m_BufferPtr : 0) + m_Material.FetchOffset(handle);
|
|
}
|
|
|
|
void
|
|
RenderResourceManager::Write(MaterialHandle handle, usize offset, usize size, const void *data)
|
|
{
|
|
m_Material.Write(handle, offset, size, data);
|
|
}
|
|
|
|
void
|
|
RenderResourceManager::Release(MaterialHandle handle)
|
|
{
|
|
if (handle.IsInvalid())
|
|
return;
|
|
|
|
m_Material.Release(handle);
|
|
}
|
|
|
|
IndexHandle
|
|
RenderResourceManager::CreateIndexBuffer(usize size, usize alignment, u32 *firstIndex)
|
|
{
|
|
IndexHandle handle = {m_Index.Create(size, alignment).m_Index};
|
|
|
|
if (firstIndex)
|
|
{
|
|
*firstIndex = FetchIndex(handle);
|
|
}
|
|
|
|
return handle;
|
|
}
|
|
|
|
u32
|
|
RenderResourceManager::FetchIndex(IndexHandle handle)
|
|
{
|
|
return Cast<u32>(m_Index.FetchOffset(handle) / sizeof(u32));
|
|
}
|
|
|
|
void
|
|
RenderResourceManager::Write(IndexHandle handle, usize offset, usize size, const void *data)
|
|
{
|
|
m_Index.Write(handle, offset, size, data);
|
|
}
|
|
|
|
void
|
|
RenderResourceManager::Release(IndexHandle handle)
|
|
{
|
|
if (handle.IsInvalid())
|
|
return;
|
|
|
|
m_Index.Release(handle);
|
|
}
|
|
|
|
vk::Buffer
|
|
RenderResourceManager::GetIndexBuffer() const
|
|
{
|
|
return m_Index.m_BackingBuffer->m_Buffer;
|
|
}
|