473 lines
14 KiB
C++
473 lines
14 KiB
C++
// =============================================
|
|
// Aster: gpu_resource_manager.cpp
|
|
// Copyright (c) 2020-2024 Anish Bhobe
|
|
// =============================================
|
|
|
|
#include "gpu_resource_manager.h"
|
|
|
|
#include "buffer.h"
|
|
#include "device.h"
|
|
#include "helpers.h"
|
|
#include "image.h"
|
|
|
|
#include <EASTL/array.h>
|
|
|
|
void
|
|
TextureManager::Init(const u32 maxCapacity)
|
|
{
|
|
m_MaxCapacity = maxCapacity;
|
|
m_FreeHead = GpuResourceHandle::INVALID_HANDLE;
|
|
}
|
|
|
|
TextureHandle
|
|
TextureManager::Commit(Texture *texture)
|
|
{
|
|
ERROR_IF(!texture->IsValid() || !texture->IsOwned(), "Buffer must be valid and owned for commital")
|
|
THEN_ABORT(-1);
|
|
|
|
if (m_FreeHead != GpuResourceHandle::INVALID_HANDLE)
|
|
{
|
|
const u32 index = m_FreeHead;
|
|
|
|
Texture *allocatedTexture = &m_Textures[index];
|
|
|
|
assert(!allocatedTexture->IsValid());
|
|
m_FreeHead = *Recast<u32 *>(allocatedTexture);
|
|
|
|
// Ensure it is copyable.
|
|
static_assert(std::is_trivially_copyable_v<Texture>);
|
|
*allocatedTexture = *texture;
|
|
|
|
// Take ownership of the buffer.
|
|
texture->m_MipLevels_ &= ~Texture::OWNED_BIT;
|
|
|
|
return {index};
|
|
}
|
|
|
|
const u32 index = Cast<u32>(m_Textures.size());
|
|
if (index < m_MaxCapacity)
|
|
{
|
|
Texture *allocatedTexture = &m_Textures.push_back();
|
|
|
|
// Ensure it is copyable.
|
|
static_assert(std::is_trivially_copyable_v<Texture>);
|
|
*allocatedTexture = *texture;
|
|
|
|
texture->m_MipLevels_ &= ~Texture::OWNED_BIT;
|
|
|
|
return {index};
|
|
}
|
|
|
|
ERROR("Out of Buffers") THEN_ABORT(-1);
|
|
}
|
|
|
|
Texture *
|
|
TextureManager::Fetch(const TextureHandle handle)
|
|
{
|
|
assert(!handle.IsInvalid());
|
|
|
|
return &m_Textures[handle.m_Index];
|
|
}
|
|
|
|
void
|
|
TextureManager::Release(const Device *device, const TextureHandle handle)
|
|
{
|
|
assert(!handle.IsInvalid());
|
|
|
|
Texture *allocatedTexture = &m_Textures[handle.m_Index];
|
|
allocatedTexture->Destroy(device);
|
|
|
|
assert(!allocatedTexture->IsValid());
|
|
*Recast<u32 *>(allocatedTexture) = m_FreeHead;
|
|
|
|
m_FreeHead = handle.m_Index;
|
|
}
|
|
|
|
void
|
|
TextureManager::Destroy(const Device *device)
|
|
{
|
|
for (auto &texture : m_Textures)
|
|
{
|
|
texture.Destroy(device);
|
|
}
|
|
}
|
|
|
|
void
|
|
BufferManager::Init(const u32 maxCapacity)
|
|
{
|
|
m_MaxCapacity = maxCapacity;
|
|
m_FreeHead = GpuResourceHandle::INVALID_HANDLE;
|
|
}
|
|
|
|
BufferHandle
|
|
BufferManager::Commit(StorageBuffer *buffer)
|
|
{
|
|
ERROR_IF(!buffer->IsValid() || !buffer->IsOwned(), "Buffer must be valid and owned for commital") THEN_ABORT(-1);
|
|
|
|
if (m_FreeHead != GpuResourceHandle::INVALID_HANDLE)
|
|
{
|
|
const u32 index = m_FreeHead;
|
|
|
|
StorageBuffer *allocatedBuffer = &m_Buffers[index];
|
|
|
|
assert(!allocatedBuffer->IsValid());
|
|
m_FreeHead = *Recast<u32 *>(allocatedBuffer);
|
|
|
|
// Ensure it is copyable.
|
|
static_assert(std::is_trivially_copyable_v<StorageBuffer>);
|
|
*allocatedBuffer = *buffer;
|
|
|
|
// Take ownership of the buffer.
|
|
buffer->m_Size_ &= ~StorageBuffer::OWNED_BIT;
|
|
|
|
return {index};
|
|
}
|
|
|
|
const u32 index = Cast<u32>(m_Buffers.size());
|
|
if (index < m_MaxCapacity)
|
|
{
|
|
StorageBuffer *allocatedBuffer = &m_Buffers.push_back();
|
|
|
|
// Ensure it is copyable.
|
|
static_assert(std::is_trivially_copyable_v<StorageBuffer>);
|
|
*allocatedBuffer = *buffer;
|
|
|
|
buffer->m_Size_ &= ~StorageBuffer::OWNED_BIT;
|
|
|
|
return {index};
|
|
}
|
|
|
|
ERROR("Out of Buffers") THEN_ABORT(-1);
|
|
}
|
|
|
|
StorageBuffer *
|
|
BufferManager::Fetch(const BufferHandle handle)
|
|
{
|
|
assert(!handle.IsInvalid());
|
|
|
|
return &m_Buffers[handle.m_Index];
|
|
}
|
|
|
|
void
|
|
BufferManager::Release(const Device *device, const BufferHandle handle)
|
|
{
|
|
assert(!handle.IsInvalid());
|
|
|
|
StorageBuffer *allocatedBuffer = &m_Buffers[handle.m_Index];
|
|
allocatedBuffer->Destroy(device);
|
|
|
|
assert(!allocatedBuffer->IsValid());
|
|
*Recast<u32 *>(allocatedBuffer) = m_FreeHead;
|
|
|
|
m_FreeHead = handle.m_Index;
|
|
}
|
|
|
|
void
|
|
BufferManager::Destroy(const Device *device)
|
|
{
|
|
for (auto &buffer : m_Buffers)
|
|
{
|
|
buffer.Destroy(device);
|
|
}
|
|
}
|
|
|
|
GpuResourceManager::WriteInfo::WriteInfo(vk::DescriptorBufferInfo info)
|
|
: uBufferInfo(info)
|
|
{
|
|
}
|
|
|
|
GpuResourceManager::WriteInfo::WriteInfo(vk::DescriptorImageInfo info)
|
|
: uImageInfo(info)
|
|
{
|
|
}
|
|
|
|
GpuResourceManager::WriteInfo::WriteInfo(vk::BufferView info)
|
|
: uBufferView(info)
|
|
{
|
|
}
|
|
|
|
BufferHandle
|
|
GpuResourceManager::Commit(StorageBuffer *storageBuffer)
|
|
{
|
|
const BufferHandle handle = m_BufferManager.Commit(storageBuffer);
|
|
|
|
m_WriteInfos.emplace_back(vk::DescriptorBufferInfo{
|
|
.buffer = storageBuffer->m_Buffer,
|
|
.offset = 0,
|
|
.range = storageBuffer->GetSize(),
|
|
});
|
|
|
|
m_Writes.push_back({
|
|
.dstSet = m_DescriptorSet,
|
|
.dstBinding = BUFFER_BINDING_INDEX,
|
|
.dstArrayElement = handle.m_Index,
|
|
.descriptorCount = 1,
|
|
.descriptorType = vk::DescriptorType::eStorageBuffer,
|
|
.pBufferInfo = &m_WriteInfos.back().uBufferInfo,
|
|
});
|
|
|
|
m_WriteOwner.emplace_back(HandleType::eBuffer, handle.m_Index);
|
|
|
|
#if !defined(NDEBUG)
|
|
++m_CommitedBufferCount;
|
|
#endif
|
|
|
|
return handle;
|
|
}
|
|
|
|
void
|
|
GpuResourceManager::Write(const BufferHandle handle, const usize offset, const usize size, const void *data)
|
|
{
|
|
m_BufferManager.Fetch(handle)->Write(m_Device, offset, size, data);
|
|
}
|
|
|
|
void
|
|
GpuResourceManager::EraseWrites(u32 handleIndex, HandleType handleType)
|
|
{
|
|
auto writeIter = m_Writes.begin();
|
|
auto ownerIter = m_WriteOwner.begin();
|
|
const auto ownerEnd = m_WriteOwner.end();
|
|
|
|
while (ownerIter != ownerEnd)
|
|
{
|
|
if (ownerIter->first == handleType && ownerIter->second == handleIndex)
|
|
{
|
|
*writeIter = m_Writes.back();
|
|
*ownerIter = m_WriteOwner.back();
|
|
m_Writes.pop_back();
|
|
m_WriteOwner.pop_back();
|
|
return;
|
|
}
|
|
|
|
++ownerIter;
|
|
++writeIter;
|
|
}
|
|
}
|
|
|
|
void
|
|
GpuResourceManager::Release(BufferHandle handle)
|
|
{
|
|
if (handle.IsInvalid())
|
|
return;
|
|
|
|
EraseWrites(handle.m_Index, HandleType::eBuffer);
|
|
|
|
m_BufferManager.Release(m_Device, handle);
|
|
|
|
#if !defined(NDEBUG)
|
|
--m_CommitedBufferCount;
|
|
#endif
|
|
}
|
|
|
|
void
|
|
GpuResourceManager::Release(TextureHandle handle)
|
|
{
|
|
if (handle.IsInvalid())
|
|
return;
|
|
|
|
EraseWrites(handle.m_Index, HandleType::eTexture);
|
|
|
|
m_TextureManager.Release(m_Device, handle);
|
|
|
|
#if !defined(NDEBUG)
|
|
--m_CommitedTextureCount;
|
|
#endif
|
|
}
|
|
|
|
TextureHandle
|
|
GpuResourceManager::Commit(Texture *texture)
|
|
{
|
|
TextureHandle handle = m_TextureManager.Commit(texture);
|
|
|
|
m_WriteInfos.emplace_back(vk::DescriptorImageInfo{
|
|
.sampler = nullptr,
|
|
.imageView = texture->m_View,
|
|
.imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
|
|
});
|
|
|
|
m_Writes.push_back({
|
|
.dstSet = m_DescriptorSet,
|
|
.dstBinding = TEXTURE_BINDING_INDEX,
|
|
.dstArrayElement = handle.m_Index,
|
|
.descriptorCount = 1,
|
|
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
|
|
.pImageInfo = &m_WriteInfos.back().uImageInfo,
|
|
});
|
|
|
|
m_WriteOwner.emplace_back(HandleType::eBuffer, handle.m_Index);
|
|
|
|
#if !defined(NDEBUG)
|
|
++m_CommitedTextureCount;
|
|
#endif
|
|
|
|
return {handle};
|
|
}
|
|
|
|
void
|
|
GpuResourceManager::Update()
|
|
{
|
|
if (m_Writes.empty() || m_WriteInfos.empty())
|
|
return;
|
|
|
|
m_Device->m_Device.updateDescriptorSets(Cast<u32>(m_Writes.size()), m_Writes.data(), 0, nullptr);
|
|
|
|
m_Writes.clear();
|
|
m_WriteInfos.clear();
|
|
m_WriteOwner.clear();
|
|
}
|
|
|
|
GpuResourceManager::GpuResourceManager(const Device *device, u16 maxSize)
|
|
: m_Device(device)
|
|
{
|
|
vk::PhysicalDeviceProperties properties;
|
|
m_Device->m_PhysicalDevice.getProperties(&properties);
|
|
|
|
u32 buffersCount = eastl::min(properties.limits.maxPerStageDescriptorStorageBuffers - 1024, Cast<u32>(maxSize));
|
|
u32 texturesCount = eastl::min(properties.limits.maxPerStageDescriptorSampledImages - 1024, Cast<u32>(maxSize));
|
|
|
|
// TODO: Switch to bindless samplers / multiple sampler configurations
|
|
vk::SamplerCreateInfo samplerCreateInfo = {
|
|
.magFilter = vk::Filter::eLinear,
|
|
.minFilter = vk::Filter::eLinear,
|
|
.mipmapMode = vk::SamplerMipmapMode::eLinear,
|
|
.addressModeU = vk::SamplerAddressMode::eRepeat,
|
|
.addressModeV = vk::SamplerAddressMode::eRepeat,
|
|
.addressModeW = vk::SamplerAddressMode::eRepeat,
|
|
.mipLodBias = 0.0f,
|
|
.anisotropyEnable = true,
|
|
.maxAnisotropy = properties.limits.maxSamplerAnisotropy,
|
|
.compareEnable = false,
|
|
.minLod = 0,
|
|
.maxLod = VK_LOD_CLAMP_NONE,
|
|
.borderColor = vk::BorderColor::eFloatOpaqueBlack,
|
|
.unnormalizedCoordinates = false,
|
|
};
|
|
AbortIfFailed(device->m_Device.createSampler(&samplerCreateInfo, nullptr, &m_ImmutableSampler));
|
|
|
|
INFO("Max Buffer Count: {}", buffersCount);
|
|
INFO("Max Texture Count: {}", texturesCount);
|
|
|
|
m_BufferManager.Init(buffersCount);
|
|
m_TextureManager.Init(texturesCount);
|
|
|
|
eastl::array poolSizes = {
|
|
vk::DescriptorPoolSize{
|
|
.type = vk::DescriptorType::eStorageBuffer,
|
|
.descriptorCount = buffersCount,
|
|
},
|
|
vk::DescriptorPoolSize{
|
|
.type = vk::DescriptorType::eCombinedImageSampler,
|
|
.descriptorCount = texturesCount,
|
|
},
|
|
};
|
|
const vk::DescriptorPoolCreateInfo poolCreateInfo = {
|
|
.flags = vk::DescriptorPoolCreateFlagBits::eUpdateAfterBind,
|
|
.maxSets = 1,
|
|
.poolSizeCount = Cast<u32>(poolSizes.size()),
|
|
.pPoolSizes = poolSizes.data(),
|
|
};
|
|
AbortIfFailed(device->m_Device.createDescriptorPool(&poolCreateInfo, nullptr, &m_DescriptorPool));
|
|
|
|
vk::DescriptorBindingFlags bindingFlags =
|
|
vk::DescriptorBindingFlagBits::ePartiallyBound | vk::DescriptorBindingFlagBits::eUpdateAfterBind;
|
|
eastl::array layoutBindingFlags = {
|
|
bindingFlags,
|
|
bindingFlags,
|
|
};
|
|
|
|
vk::DescriptorSetLayoutBindingFlagsCreateInfo bindingFlagsCreateInfo = {
|
|
.bindingCount = Cast<u32>(layoutBindingFlags.size()),
|
|
.pBindingFlags = layoutBindingFlags.data(),
|
|
};
|
|
|
|
eastl::vector immutableSamplers(texturesCount, m_ImmutableSampler);
|
|
eastl::array descriptorLayoutBindings = {
|
|
vk::DescriptorSetLayoutBinding{
|
|
.binding = BUFFER_BINDING_INDEX,
|
|
.descriptorType = vk::DescriptorType::eStorageBuffer,
|
|
.descriptorCount = Cast<u32>(buffersCount),
|
|
.stageFlags = vk::ShaderStageFlagBits::eAll,
|
|
},
|
|
vk::DescriptorSetLayoutBinding{
|
|
.binding = TEXTURE_BINDING_INDEX,
|
|
.descriptorType = vk::DescriptorType::eCombinedImageSampler,
|
|
.descriptorCount = Cast<u32>(texturesCount),
|
|
.stageFlags = vk::ShaderStageFlagBits::eAll,
|
|
.pImmutableSamplers = immutableSamplers.data(),
|
|
},
|
|
};
|
|
static_assert(layoutBindingFlags.size() == descriptorLayoutBindings.size());
|
|
const vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = {
|
|
.pNext = &bindingFlagsCreateInfo,
|
|
.flags = vk::DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool,
|
|
.bindingCount = Cast<u32>(descriptorLayoutBindings.size()),
|
|
.pBindings = descriptorLayoutBindings.data(),
|
|
};
|
|
AbortIfFailed(device->m_Device.createDescriptorSetLayout(&descriptorSetLayoutCreateInfo, nullptr, &m_SetLayout));
|
|
|
|
// One descriptor is enough. Updating it at any time is safe. (Update until submit, data held when pending)
|
|
// https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VK_EXT_descriptor_indexing.html
|
|
// https://github.com/KhronosGroup/Vulkan-Guide/blob/main/chapters/extensions/VK_EXT_descriptor_indexing.adoc
|
|
const vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo = {
|
|
.descriptorPool = m_DescriptorPool,
|
|
.descriptorSetCount = 1,
|
|
.pSetLayouts = &m_SetLayout,
|
|
};
|
|
AbortIfFailed(device->m_Device.allocateDescriptorSets(&descriptorSetAllocateInfo, &m_DescriptorSet));
|
|
|
|
m_Device->SetName(m_SetLayout, "Bindless Layout");
|
|
m_Device->SetName(m_DescriptorPool, "Bindless Pool");
|
|
m_Device->SetName(m_DescriptorSet, "Bindless Set");
|
|
}
|
|
|
|
GpuResourceManager::~GpuResourceManager()
|
|
{
|
|
#if !defined(NDEBUG)
|
|
WARN_IF(m_CommitedBufferCount > 0 || m_CommitedTextureCount > 0, "Resources alive: SSBO = {}, Textures = {}",
|
|
m_CommitedBufferCount, m_CommitedTextureCount);
|
|
#endif
|
|
|
|
m_BufferManager.Destroy(m_Device);
|
|
m_Device->m_Device.destroy(m_ImmutableSampler, nullptr);
|
|
m_Device->m_Device.destroy(m_DescriptorPool, nullptr);
|
|
m_Device->m_Device.destroy(m_SetLayout, nullptr);
|
|
}
|
|
|
|
GpuResourceManager::GpuResourceManager(GpuResourceManager &&other) noexcept
|
|
: m_WriteInfos(std::move(other.m_WriteInfos))
|
|
, m_Writes(std::move(other.m_Writes))
|
|
, m_WriteOwner(std::move(other.m_WriteOwner))
|
|
, m_ImmutableSampler(other.m_ImmutableSampler)
|
|
, m_BufferManager(std::move(other.m_BufferManager))
|
|
, m_TextureManager(std::move(other.m_TextureManager))
|
|
, m_Device(Take(other.m_Device))
|
|
, m_DescriptorPool(other.m_DescriptorPool)
|
|
, m_SetLayout(other.m_SetLayout)
|
|
, m_DescriptorSet(other.m_DescriptorSet)
|
|
, m_CommitedBufferCount(other.m_CommitedBufferCount)
|
|
, m_CommitedTextureCount(other.m_CommitedTextureCount)
|
|
{
|
|
assert(!other.m_Device);
|
|
}
|
|
|
|
GpuResourceManager &
|
|
GpuResourceManager::operator=(GpuResourceManager &&other) noexcept
|
|
{
|
|
if (this == &other)
|
|
return *this;
|
|
m_WriteInfos = std::move(other.m_WriteInfos);
|
|
m_Writes = std::move(other.m_Writes);
|
|
m_WriteOwner = std::move(other.m_WriteOwner);
|
|
m_ImmutableSampler = other.m_ImmutableSampler;
|
|
m_BufferManager = std::move(other.m_BufferManager);
|
|
m_TextureManager = std::move(other.m_TextureManager);
|
|
m_Device = Take(other.m_Device); // Ensure taken.
|
|
m_DescriptorPool = other.m_DescriptorPool;
|
|
m_SetLayout = other.m_SetLayout;
|
|
m_DescriptorSet = other.m_DescriptorSet;
|
|
m_CommitedBufferCount = other.m_CommitedBufferCount;
|
|
m_CommitedTextureCount = other.m_CommitedTextureCount;
|
|
|
|
assert(!other.m_Device);
|
|
return *this;
|
|
} |