// ============================================= // Aster: gpu_resource_manager.cpp // Copyright (c) 2020-2024 Anish Bhobe // ============================================= #include "gpu_resource_manager.h" #include "buffer.h" #include "device.h" #include "helpers.h" #include "image.h" #include void TextureManager::Init(const u32 maxCapacity) { m_MaxCapacity = maxCapacity; m_FreeHead = GpuResourceHandle::INVALID_HANDLE; } TextureHandle TextureManager::Commit(Texture *texture) { ERROR_IF(!texture || !texture->IsValid(), "Texture must be valid for commital") THEN_ABORT(-1); if (m_FreeHead != GpuResourceHandle::INVALID_HANDLE) { const u32 index = m_FreeHead; Texture *allocatedTexture = &m_Textures[index]; assert(!allocatedTexture->IsValid()); m_FreeHead = *Recast(allocatedTexture); // Ensure it is copyable. static_assert(std::is_trivially_copyable_v); *allocatedTexture = *texture; // Take ownership of the texture. texture->m_MipLevels_ &= ~Texture::OWNED_BIT; return {index}; } const u32 index = Cast(m_Textures.size()); if (index < m_MaxCapacity) { Texture *allocatedTexture = &m_Textures.push_back(); // Ensure it is copyable. static_assert(std::is_trivially_copyable_v); *allocatedTexture = *texture; texture->m_MipLevels_ &= ~Texture::OWNED_BIT; return {index}; } ERROR("Out of Buffers") THEN_ABORT(-1); } Texture * TextureManager::Fetch(const TextureHandle handle) { assert(!handle.IsInvalid()); return &m_Textures[handle.m_Index]; } void TextureManager::Release(const Device *device, const TextureHandle handle) { assert(!handle.IsInvalid()); Texture *allocatedTexture = &m_Textures[handle.m_Index]; allocatedTexture->Destroy(device); assert(!allocatedTexture->IsValid()); *Recast(allocatedTexture) = m_FreeHead; m_FreeHead = handle.m_Index; } void TextureManager::Destroy(const Device *device) { for (auto &texture : m_Textures) { texture.Destroy(device); } } void BufferManager::Init(const u32 maxCapacity) { m_MaxCapacity = maxCapacity; m_FreeHead = GpuResourceHandle::INVALID_HANDLE; } BufferHandle BufferManager::Commit(StorageBuffer *buffer) { ERROR_IF(!buffer || !buffer->IsValid() || !buffer->IsOwned(), "Buffer must be valid and owned for commital") THEN_ABORT(-1); if (m_FreeHead != GpuResourceHandle::INVALID_HANDLE) { const u32 index = m_FreeHead; StorageBuffer *allocatedBuffer = &m_Buffers[index]; assert(!allocatedBuffer->IsValid()); m_FreeHead = *Recast(allocatedBuffer); // Ensure it is copyable. static_assert(std::is_trivially_copyable_v); *allocatedBuffer = *buffer; // Take ownership of the buffer. buffer->m_Size_ &= ~StorageBuffer::OWNED_BIT; return {index}; } const u32 index = Cast(m_Buffers.size()); if (index < m_MaxCapacity) { StorageBuffer *allocatedBuffer = &m_Buffers.push_back(); // Ensure it is copyable. static_assert(std::is_trivially_copyable_v); *allocatedBuffer = *buffer; buffer->m_Size_ &= ~StorageBuffer::OWNED_BIT; return {index}; } ERROR("Out of Buffers") THEN_ABORT(-1); } StorageBuffer * BufferManager::Fetch(const BufferHandle handle) { assert(!handle.IsInvalid()); return &m_Buffers[handle.m_Index]; } void BufferManager::Release(const Device *device, const BufferHandle handle) { assert(!handle.IsInvalid()); StorageBuffer *allocatedBuffer = &m_Buffers[handle.m_Index]; allocatedBuffer->Destroy(device); assert(!allocatedBuffer->IsValid()); *Recast(allocatedBuffer) = m_FreeHead; m_FreeHead = handle.m_Index; } void BufferManager::Destroy(const Device *device) { for (auto &buffer : m_Buffers) { buffer.Destroy(device); } } StorageTextureHandle StorageTextureManager::Commit(StorageTexture *texture) { const TextureHandle tx = TextureManager::Commit(texture); return {tx.m_Index}; } StorageTexture * StorageTextureManager::Fetch(const StorageTextureHandle handle) { assert(!handle.IsInvalid()); return Recast(&m_Textures[handle.m_Index]); } void StorageTextureManager::Release(const Device *device, const StorageTextureHandle handle) { TextureManager::Release(device, {handle.m_Index}); } GpuResourceManager::WriteInfo::WriteInfo(vk::DescriptorBufferInfo info) : uBufferInfo(info) { } GpuResourceManager::WriteInfo::WriteInfo(vk::DescriptorImageInfo info) : uImageInfo(info) { } GpuResourceManager::WriteInfo::WriteInfo(vk::BufferView info) : uBufferView(info) { } BufferHandle GpuResourceManager::Commit(StorageBuffer *storageBuffer) { const BufferHandle handle = m_BufferManager.Commit(storageBuffer); m_WriteInfos.emplace_back(vk::DescriptorBufferInfo{ .buffer = storageBuffer->m_Buffer, .offset = 0, .range = storageBuffer->GetSize(), }); m_Writes.push_back({ .dstSet = m_DescriptorSet, .dstBinding = BUFFER_BINDING_INDEX, .dstArrayElement = handle.m_Index, .descriptorCount = 1, .descriptorType = vk::DescriptorType::eStorageBuffer, .pBufferInfo = &m_WriteInfos.back().uBufferInfo, }); m_WriteOwner.emplace_back(HandleType::eBuffer, handle.m_Index); #if !defined(NDEBUG) ++m_CommitedBufferCount; #endif return handle; } void GpuResourceManager::Write(const BufferHandle handle, const usize offset, const usize size, const void *data) { m_BufferManager.Fetch(handle)->Write(m_Device, offset, size, data); } void GpuResourceManager::EraseWrites(u32 handleIndex, HandleType handleType) { auto writeIter = m_Writes.begin(); auto ownerIter = m_WriteOwner.begin(); const auto ownerEnd = m_WriteOwner.end(); while (ownerIter != ownerEnd) { if (ownerIter->first == handleType && ownerIter->second == handleIndex) { *writeIter = m_Writes.back(); *ownerIter = m_WriteOwner.back(); m_Writes.pop_back(); m_WriteOwner.pop_back(); return; } ++ownerIter; ++writeIter; } } void GpuResourceManager::Release(BufferHandle handle) { if (handle.IsInvalid()) return; EraseWrites(handle.m_Index, HandleType::eBuffer); m_BufferManager.Release(m_Device, handle); #if !defined(NDEBUG) --m_CommitedBufferCount; #endif } void GpuResourceManager::Release(StorageBuffer *storageBuffer, const BufferHandle handle) { assert(storageBuffer); assert(!storageBuffer->IsValid()); StorageBuffer *internal = m_BufferManager.Fetch(handle); *storageBuffer = *internal; internal->m_Size_ &= ~StorageBuffer::OWNED_BIT; Release(handle); } void GpuResourceManager::Release(TextureHandle handle) { if (handle.IsInvalid()) return; EraseWrites(handle.m_Index, HandleType::eTexture); m_TextureManager.Release(m_Device, handle); #if !defined(NDEBUG) --m_CommitedTextureCount; #endif } void GpuResourceManager::Release(Texture *texture, TextureHandle handle) { assert(texture); assert(!texture->IsValid()); Texture *internal = m_TextureManager.Fetch(handle); *texture = *internal; internal->m_MipLevels_ &= ~Texture::OWNED_BIT; Release(handle); } TextureHandle GpuResourceManager::CommitTexture(Texture *texture) { TextureHandle handle = m_TextureManager.Commit(texture); m_WriteInfos.emplace_back(vk::DescriptorImageInfo{ .sampler = nullptr, .imageView = texture->m_View, .imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal, }); m_Writes.push_back({ .dstSet = m_DescriptorSet, .dstBinding = TEXTURE_BINDING_INDEX, .dstArrayElement = handle.m_Index, .descriptorCount = 1, .descriptorType = vk::DescriptorType::eCombinedImageSampler, .pImageInfo = &m_WriteInfos.back().uImageInfo, }); m_WriteOwner.emplace_back(HandleType::eTexture, handle.m_Index); #if !defined(NDEBUG) ++m_CommitedTextureCount; #endif return {handle}; } StorageTextureHandle GpuResourceManager::CommitStorageTexture(StorageTexture *storageTexture) { StorageTextureHandle handle = m_StorageTextureManager.Commit(storageTexture); m_WriteInfos.emplace_back(vk::DescriptorImageInfo{ .sampler = nullptr, .imageView = storageTexture->m_View, .imageLayout = vk::ImageLayout::eGeneral, }); m_Writes.push_back({ .dstSet = m_DescriptorSet, .dstBinding = STORAGE_TEXTURE_BINDING_INDEX, .dstArrayElement = handle.m_Index, .descriptorCount = 1, .descriptorType = vk::DescriptorType::eStorageImage, .pImageInfo = &m_WriteInfos.back().uImageInfo, }); m_WriteOwner.emplace_back(HandleType::eStorageTexture, handle.m_Index); #if !defined(NDEBUG) ++m_CommitedStorageTextureCount; #endif return {handle}; } void GpuResourceManager::Release(StorageTextureHandle handle) { if (handle.IsInvalid()) return; EraseWrites(handle.m_Index, HandleType::eTexture); m_StorageTextureManager.Release(m_Device, handle); #if !defined(NDEBUG) --m_CommitedStorageTextureCount; #endif } void GpuResourceManager::Release(StorageTexture *texture, const StorageTextureHandle handle) { assert(texture); assert(!texture->IsValid()); StorageTexture *internal = m_StorageTextureManager.Fetch(handle); *texture = *internal; internal->m_MipLevels_ &= ~StorageTexture::OWNED_BIT; Release(handle); } void GpuResourceManager::Update() { if (m_Writes.empty() || m_WriteInfos.empty()) return; m_Device->m_Device.updateDescriptorSets(Cast(m_Writes.size()), m_Writes.data(), 0, nullptr); m_Writes.clear(); m_WriteInfos.clear(); m_WriteOwner.clear(); } GpuResourceManager::GpuResourceManager(Device *device, u16 maxSize) : m_Device(device) { vk::PhysicalDeviceProperties properties; m_Device->m_PhysicalDevice.getProperties(&properties); u32 buffersCount = eastl::min(properties.limits.maxPerStageDescriptorStorageBuffers - 1024, Cast(maxSize)); u32 texturesCount = eastl::min(properties.limits.maxPerStageDescriptorSampledImages - 1024, Cast(maxSize)); u32 storageTexturesCount = eastl::min(properties.limits.maxPerStageDescriptorStorageImages - 1024, Cast(maxSize)); // TODO: Switch to bindless samplers / multiple sampler configurations vk::SamplerCreateInfo samplerCreateInfo = { .magFilter = vk::Filter::eLinear, .minFilter = vk::Filter::eLinear, .mipmapMode = vk::SamplerMipmapMode::eLinear, .addressModeU = vk::SamplerAddressMode::eRepeat, .addressModeV = vk::SamplerAddressMode::eRepeat, .addressModeW = vk::SamplerAddressMode::eRepeat, .mipLodBias = 0.0f, .anisotropyEnable = true, .maxAnisotropy = properties.limits.maxSamplerAnisotropy, .compareEnable = false, .minLod = 0, .maxLod = VK_LOD_CLAMP_NONE, .borderColor = vk::BorderColor::eFloatOpaqueBlack, .unnormalizedCoordinates = false, }; AbortIfFailed(device->m_Device.createSampler(&samplerCreateInfo, nullptr, &m_ImmutableSampler)); INFO("Max Buffer Count: {}", buffersCount); INFO("Max Texture Count: {}", texturesCount); INFO("Max Storage Texture Count: {}", storageTexturesCount); m_BufferManager.Init(buffersCount); m_TextureManager.Init(texturesCount); m_StorageTextureManager.Init(storageTexturesCount); eastl::array poolSizes = { vk::DescriptorPoolSize{ .type = vk::DescriptorType::eStorageBuffer, .descriptorCount = buffersCount, }, vk::DescriptorPoolSize{ .type = vk::DescriptorType::eCombinedImageSampler, .descriptorCount = texturesCount, }, vk::DescriptorPoolSize{ .type = vk::DescriptorType::eStorageImage, .descriptorCount = storageTexturesCount, }, }; const vk::DescriptorPoolCreateInfo poolCreateInfo = { .flags = vk::DescriptorPoolCreateFlagBits::eUpdateAfterBind, .maxSets = 1, .poolSizeCount = Cast(poolSizes.size()), .pPoolSizes = poolSizes.data(), }; AbortIfFailed(device->m_Device.createDescriptorPool(&poolCreateInfo, nullptr, &m_DescriptorPool)); vk::DescriptorBindingFlags bindingFlags = vk::DescriptorBindingFlagBits::ePartiallyBound | vk::DescriptorBindingFlagBits::eUpdateAfterBind; eastl::array layoutBindingFlags = { bindingFlags, bindingFlags, bindingFlags, }; vk::DescriptorSetLayoutBindingFlagsCreateInfo bindingFlagsCreateInfo = { .bindingCount = Cast(layoutBindingFlags.size()), .pBindingFlags = layoutBindingFlags.data(), }; eastl::vector immutableSamplers(texturesCount, m_ImmutableSampler); eastl::array descriptorLayoutBindings = { vk::DescriptorSetLayoutBinding{ .binding = BUFFER_BINDING_INDEX, .descriptorType = vk::DescriptorType::eStorageBuffer, .descriptorCount = Cast(buffersCount), .stageFlags = vk::ShaderStageFlagBits::eAll, }, vk::DescriptorSetLayoutBinding{ .binding = TEXTURE_BINDING_INDEX, .descriptorType = vk::DescriptorType::eCombinedImageSampler, .descriptorCount = Cast(texturesCount), .stageFlags = vk::ShaderStageFlagBits::eAll, .pImmutableSamplers = immutableSamplers.data(), }, vk::DescriptorSetLayoutBinding{ .binding = STORAGE_TEXTURE_BINDING_INDEX, .descriptorType = vk::DescriptorType::eStorageImage, .descriptorCount = Cast(storageTexturesCount), .stageFlags = vk::ShaderStageFlagBits::eAll, }, }; static_assert(layoutBindingFlags.size() == descriptorLayoutBindings.size()); const vk::DescriptorSetLayoutCreateInfo descriptorSetLayoutCreateInfo = { .pNext = &bindingFlagsCreateInfo, .flags = vk::DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool, .bindingCount = Cast(descriptorLayoutBindings.size()), .pBindings = descriptorLayoutBindings.data(), }; AbortIfFailed(device->m_Device.createDescriptorSetLayout(&descriptorSetLayoutCreateInfo, nullptr, &m_SetLayout)); // One descriptor is enough. Updating it at any time is safe. (Update until submit, data held when pending) // https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VK_EXT_descriptor_indexing.html // https://github.com/KhronosGroup/Vulkan-Guide/blob/main/chapters/extensions/VK_EXT_descriptor_indexing.adoc const vk::DescriptorSetAllocateInfo descriptorSetAllocateInfo = { .descriptorPool = m_DescriptorPool, .descriptorSetCount = 1, .pSetLayouts = &m_SetLayout, }; AbortIfFailed(device->m_Device.allocateDescriptorSets(&descriptorSetAllocateInfo, &m_DescriptorSet)); m_Device->SetName(m_SetLayout, "Bindless Layout"); m_Device->SetName(m_DescriptorPool, "Bindless Pool"); m_Device->SetName(m_DescriptorSet, "Bindless Set"); } GpuResourceManager::~GpuResourceManager() { #if !defined(NDEBUG) WARN_IF(m_CommitedBufferCount > 0 || m_CommitedTextureCount > 0 || m_CommitedStorageTextureCount > 0, "Resources alive: SSBO = {}, Textures = {}, RWTexture = {}", m_CommitedBufferCount, m_CommitedTextureCount, m_CommitedStorageTextureCount); #endif m_BufferManager.Destroy(m_Device); m_TextureManager.Destroy(m_Device); m_StorageTextureManager.Destroy(m_Device); m_Device->m_Device.destroy(m_ImmutableSampler, nullptr); m_Device->m_Device.destroy(m_DescriptorPool, nullptr); m_Device->m_Device.destroy(m_SetLayout, nullptr); } GpuResourceManager::GpuResourceManager(GpuResourceManager &&other) noexcept : m_WriteInfos(std::move(other.m_WriteInfos)) , m_Writes(std::move(other.m_Writes)) , m_WriteOwner(std::move(other.m_WriteOwner)) , m_ImmutableSampler(other.m_ImmutableSampler) , m_BufferManager(std::move(other.m_BufferManager)) , m_TextureManager(std::move(other.m_TextureManager)) , m_StorageTextureManager(std::move(other.m_StorageTextureManager)) , m_Device(Take(other.m_Device)) , m_DescriptorPool(other.m_DescriptorPool) , m_SetLayout(other.m_SetLayout) , m_DescriptorSet(other.m_DescriptorSet) , m_CommitedBufferCount(other.m_CommitedBufferCount) , m_CommitedTextureCount(other.m_CommitedTextureCount) { assert(!other.m_Device); } GpuResourceManager & GpuResourceManager::operator=(GpuResourceManager &&other) noexcept { if (this == &other) return *this; m_WriteInfos = std::move(other.m_WriteInfos); m_Writes = std::move(other.m_Writes); m_WriteOwner = std::move(other.m_WriteOwner); m_ImmutableSampler = other.m_ImmutableSampler; m_BufferManager = std::move(other.m_BufferManager); m_TextureManager = std::move(other.m_TextureManager); m_StorageTextureManager = std::move(other.m_StorageTextureManager); m_Device = Take(other.m_Device); // Ensure taken. m_DescriptorPool = other.m_DescriptorPool; m_SetLayout = other.m_SetLayout; m_DescriptorSet = other.m_DescriptorSet; m_CommitedBufferCount = other.m_CommitedBufferCount; m_CommitedTextureCount = other.m_CommitedTextureCount; assert(!other.m_Device); return *this; }