Blaze/Blaze/BufferManager.cpp

338 lines
9.7 KiB
C++

#include "BufferManager.h"
#include "GlobalMemory.h"
template struct RID<Buffer>;
void BufferManager::destroyBuffer( Buffer& buf )
{
if ( not buf.buffer ) return;
ASSERT( m_pRenderDevice );
uint32_t const index = buf.index;
uint32_t const innerIndex = index & INDEX_MASK;
uint32_t const generation = ( index & GENERATION_MASK ) >> GENERATION_OFFSET;
RenderDevice const& renderDevice = *m_pRenderDevice;
vmaDestroyBuffer( renderDevice.gpuAllocator, Take( buf.buffer ), Take( buf.allocation ) );
buf.size = 0;
buf.mappedData = nullptr;
buf.index = innerIndex | ( generation + 1 ) << GENERATION_OFFSET;
// NOTE: DO NOT EDIT INNER INDEX.
ASSERT( innerIndex == ( buf.index & INDEX_MASK ) and "Index should not be modified" );
ASSERT( buf.index > index and "Generation should increase." );
m_freeList.pushBack( reinterpret_cast<FreeList::Node*>( &buf ) );
--m_count;
}
Buffer& BufferManager::fetchBufferUnchecked( BufferID const& rid )
{
uint32_t const index = *reinterpret_cast<uint32_t const*>( &rid );
uint32_t const innerIndex = index & INDEX_MASK;
return m_aBuffers[innerIndex];
}
void BufferManager::writeToBufferImpl( BufferID const& rid, void const* data, size_t const size )
{
ASSERT( isValidID( rid ) );
Buffer const& buffer = fetchBufferUnchecked( rid );
ASSERT( size <= buffer.size );
memcpy( buffer.mappedData, data, size );
}
bool BufferManager::isValidID( BufferID const& rid ) const
{
uint32_t const index = *reinterpret_cast<uint32_t const*>( &rid );
uint32_t const innerIndex = index & INDEX_MASK;
if ( innerIndex > m_capacity ) return false;
return m_aBuffers[innerIndex].index == index;
}
std::optional<BufferID> BufferManager::createVertexBuffer( size_t const size )
{
if ( m_freeList.empty() )
{
return std::nullopt;
}
Buffer* bufferSlot = reinterpret_cast<Buffer*>( m_freeList.popFront() );
++m_count;
ASSERT( m_pRenderDevice );
RenderDevice const& renderDevice = *m_pRenderDevice;
VkBufferCreateInfo const bufferCreateInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = size,
.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr allocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocationInfo;
VkBuffer vertexBuffer;
VmaAllocation vertexBufferAllocation;
VK_CHECK( vmaCreateBuffer(
renderDevice.gpuAllocator,
&bufferCreateInfo,
&allocationCreateInfo,
&vertexBuffer,
&vertexBufferAllocation,
&allocationInfo ) );
// NOTE: textureSlot preserves index between uses.
uint32_t index = bufferSlot->index;
new ( bufferSlot ) Buffer{
.buffer = vertexBuffer,
.allocation = vertexBufferAllocation,
.mappedData = static_cast<std::byte*>( allocationInfo.pMappedData ),
.deviceAddress = 0,
.size = size,
.index = index,
};
// NOTE: Memory hackery to create TextureID;
return std::move( *reinterpret_cast<BufferID*>( &index ) );
}
std::optional<BufferID> BufferManager::createIndexBuffer( size_t size )
{
if ( m_freeList.empty() )
{
return std::nullopt;
}
Buffer* bufferSlot = reinterpret_cast<Buffer*>( m_freeList.popFront() );
++m_count;
ASSERT( m_pRenderDevice );
RenderDevice const& renderDevice = *m_pRenderDevice;
VkBufferCreateInfo const bufferCreateInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = size,
.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr allocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocationInfo;
VkBuffer indexBuffer;
VmaAllocation indexBufferAllocation;
VK_CHECK( vmaCreateBuffer(
renderDevice.gpuAllocator,
&bufferCreateInfo,
&allocationCreateInfo,
&indexBuffer,
&indexBufferAllocation,
&allocationInfo ) );
// NOTE: bufferSlot preserves index between uses.
uint32_t index = bufferSlot->index;
new ( bufferSlot ) Buffer{
.buffer = indexBuffer,
.allocation = indexBufferAllocation,
.mappedData = static_cast<std::byte*>( allocationInfo.pMappedData ),
.deviceAddress = 0,
.size = size,
.index = index,
};
// NOTE: Memory hackery to create BufferID;
return std::move( *reinterpret_cast<BufferID*>( &index ) );
}
std::optional<BufferID> BufferManager::createStorageBuffer( size_t size )
{
if ( m_freeList.empty() )
{
return std::nullopt;
}
Buffer* bufferSlot = reinterpret_cast<Buffer*>( m_freeList.popFront() );
++m_count;
ASSERT( m_pRenderDevice );
RenderDevice const& renderDevice = *m_pRenderDevice;
VkBufferCreateInfo const bufferCreateInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = size,
.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr allocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocationInfo;
VkBuffer storageBuffer;
VmaAllocation storageBufferAllocation;
VK_CHECK( vmaCreateBuffer(
renderDevice.gpuAllocator,
&bufferCreateInfo,
&allocationCreateInfo,
&storageBuffer,
&storageBufferAllocation,
&allocationInfo ) );
VkBufferDeviceAddressInfo const deviceAddressInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO,
.pNext = nullptr,
.buffer = storageBuffer,
};
VkDeviceAddress const deviceAddress = vkGetBufferDeviceAddress( renderDevice.device, &deviceAddressInfo );
// NOTE: bufferSlot preserves index between uses.
uint32_t index = bufferSlot->index;
new ( bufferSlot ) Buffer{
.buffer = storageBuffer,
.allocation = storageBufferAllocation,
.mappedData = static_cast<std::byte*>( allocationInfo.pMappedData ),
.deviceAddress = deviceAddress,
.size = size,
.index = index,
};
// NOTE: Memory hackery to create BufferID;
return std::move( *reinterpret_cast<BufferID*>( &index ) );
}
void BufferManager::freeBuffer( BufferID&& rid )
{
if ( not isValidID( rid ) ) return;
Buffer& buffer = fetchBufferUnchecked( rid );
destroyBuffer( buffer );
auto _ = std::move( rid );
}
std::optional<VkBuffer> BufferManager::fetchBuffer( BufferID const& rid )
{
if ( not isValidID( rid ) ) return std::nullopt;
return fetchBufferUnchecked( rid ).buffer;
}
std::optional<VkDeviceAddress> BufferManager::fetchDeviceAddress( BufferID const& rid )
{
if ( not isValidID( rid ) ) return std::nullopt;
Buffer const& buffer = fetchBufferUnchecked( rid );
if ( buffer.deviceAddress == 0 ) return std::nullopt;
return buffer.deviceAddress;
}
BufferManager::BufferManager( RenderDevice* pRenderDevice, Buffer* aBuffers, uint32_t const capacity )
: m_pRenderDevice{ pRenderDevice }, m_aBuffers{ aBuffers }, m_count{ 0 }, m_capacity{ capacity }
{
uint32_t i = 0;
for ( Buffer& tex : std::span{ m_aBuffers, m_capacity } )
{
// Default Generation is 1
tex.index = i++ | ( 1 << GENERATION_OFFSET );
m_freeList.pushFront( reinterpret_cast<FreeList::Node*>( &tex ) );
}
}
void BufferManager::destroy()
{
#if defined( _DEBUG )
if ( m_count > 0 )
{
SDL_LogError( SDL_LOG_CATEGORY_ERROR, "%u buffers still allocated.", m_count );
}
#endif
while ( not m_freeList.empty() )
{
Buffer* buf = reinterpret_cast<Buffer*>( m_freeList.popFront() );
memset( buf, 0, sizeof *buf );
}
for ( Buffer& buf : std::span{ m_aBuffers, m_count } )
{
destroyBuffer( buf );
}
}
BufferManager::~BufferManager()
{
ASSERT( not m_aBuffers );
}
BufferManager* BufferManager_Create( GlobalMemory* mem, RenderDevice* renderDevice, uint32_t maxCount )
{
Buffer* buffers = reinterpret_cast<Buffer*>( mem->allocate( maxCount * sizeof( Buffer ), alignof( Buffer ) ) );
if ( not buffers ) return nullptr;
std::byte* allocation = mem->allocate( sizeof( BufferManager ), alignof( BufferManager ) );
if ( not allocation ) return nullptr;
return new ( allocation ) BufferManager{ renderDevice, buffers, maxCount };
}