Blaze/Blaze/EntityManager.cpp

510 lines
21 KiB
C++

#include "EntityManager.h"
#include <array>
#include "GlobalMemory.h"
#include "RenderDevice.h"
#include <stb_image.h>
#include "Frame.h"
#include "TextureManager.h"
Entity* EntityManager::createEntity(
Transform const& transform, std::span<Vertex> const vertices, const char* textureFile )
{
ASSERT( pRenderDevice );
RenderDevice& renderDevice = *pRenderDevice;
Mesh mesh;
{
mesh.vertexCount = static_cast<uint32_t>( vertices.size() );
mesh.vertexBufferSize = static_cast<uint32_t>( vertices.size_bytes() );
VkBufferCreateInfo const bufferCreateInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = mesh.vertexBufferSize,
.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr allocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocationInfo;
VK_CHECK( vmaCreateBuffer(
pRenderDevice->gpuAllocator,
&bufferCreateInfo,
&allocationCreateInfo,
&mesh.vertexBuffer,
&mesh.vertexBufferAllocation,
&allocationInfo ) );
if ( allocationInfo.pMappedData )
{
memcpy( allocationInfo.pMappedData, vertices.data(), vertices.size_bytes() );
}
}
Material material;
{
VkSampler sampler;
uint32_t width;
uint32_t height;
uint32_t numChannels = 4;
stbi_uc* textureData;
{
int w;
int h;
int nc;
int requestedChannels = static_cast<int>( numChannels );
textureData = stbi_load( textureFile, &w, &h, &nc, requestedChannels );
ASSERT( nc <= requestedChannels );
if ( not textureData )
{
vmaDestroyBuffer( pRenderDevice->gpuAllocator, Take( mesh.vertexBuffer ), Take( mesh.vertexBufferAllocation ) );
SDL_LogError( SDL_LOG_CATEGORY_ERROR, "%s", stbi_failure_reason() );
return nullptr;
}
width = static_cast<uint32_t>( w );
height = static_cast<uint32_t>( h );
}
VkSamplerCreateInfo constexpr samplerCreateInfo = {
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.magFilter = VK_FILTER_LINEAR,
.minFilter = VK_FILTER_LINEAR,
.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR,
.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT,
.mipLodBias = 0.0,
.anisotropyEnable = true,
.maxAnisotropy = 1.0f,
.compareEnable = false,
.compareOp = VK_COMPARE_OP_NEVER,
.minLod = 0.0f,
.maxLod = VK_LOD_CLAMP_NONE,
.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK,
.unnormalizedCoordinates = false,
};
VK_CHECK( vkCreateSampler( renderDevice.device, &samplerCreateInfo, nullptr, &sampler ) );
auto textureOpt = renderDevice.textureManager->createTexture( { width, height, 1 }, sampler );
if ( not textureOpt )
{
vmaDestroyBuffer( pRenderDevice->gpuAllocator, Take( mesh.vertexBuffer ), Take( mesh.vertexBufferAllocation ) );
SDL_LogError( SDL_LOG_CATEGORY_ERROR, "%s", stbi_failure_reason() );
stbi_image_free( textureData );
return nullptr;
}
TextureID texture = std::move( textureOpt.value() );
VkImage textureImage = renderDevice.textureManager->fetchImage( texture ).value();
// Staging Buffer Create
VkBuffer stagingBuffer;
VmaAllocation stagingAllocation;
{
VkBufferCreateInfo const stagingBufferCreateInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = nullptr,
.flags = 0,
.size = static_cast<VkDeviceSize>( width ) * height * numChannels * sizeof( textureData[0] ),
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = nullptr,
};
VmaAllocationCreateInfo constexpr stagingAllocationCreateInfo = {
.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT,
.usage = VMA_MEMORY_USAGE_AUTO,
.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.preferredFlags = 0,
.memoryTypeBits = 0,
.pool = nullptr,
.pUserData = nullptr,
.priority = 1.0f,
};
VmaAllocationInfo allocationInfo;
VK_CHECK( vmaCreateBuffer(
renderDevice.gpuAllocator,
&stagingBufferCreateInfo,
&stagingAllocationCreateInfo,
&stagingBuffer,
&stagingAllocation,
&allocationInfo ) );
if ( allocationInfo.pMappedData )
{
memcpy( allocationInfo.pMappedData, textureData, stagingBufferCreateInfo.size );
}
}
// All data is copied to stagingBuffer, don't need this.
stbi_image_free( textureData );
// Staging -> Texture transfer
{
Frame& frameInUse = renderDevice.frames[renderDevice.frameIndex];
// This should just pass.
VK_CHECK( vkWaitForFences( renderDevice.device, 1, &frameInUse.frameReadyToReuse, VK_TRUE, INT64_MAX ) );
// Reset Frame
VK_CHECK( vkResetFences( renderDevice.device, 1, &frameInUse.frameReadyToReuse ) );
VK_CHECK( vkResetCommandPool( renderDevice.device, frameInUse.commandPool, 0 ) );
VkCommandBufferBeginInfo constexpr beginInfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.pNext = nullptr,
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
.pInheritanceInfo = nullptr,
};
uint32_t mipLevels = TextureManager::calculateRequiredMipLevels( width, height, 1 );
VkImageSubresourceRange const subresourceRange = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = mipLevels,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkImageMemoryBarrier2 const creationToTransferImageBarrier = {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT,
.srcAccessMask = VK_ACCESS_2_NONE,
.dstStageMask = VK_PIPELINE_STAGE_2_COPY_BIT,
.dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = renderDevice.textureManager->fetchImage( texture ).value(),
.subresourceRange = subresourceRange,
};
VkDependencyInfo const creationToTransferDependency = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.pNext = nullptr,
.dependencyFlags = 0,
.memoryBarrierCount = 0,
.pMemoryBarriers = nullptr,
.bufferMemoryBarrierCount = 0,
.pBufferMemoryBarriers = nullptr,
.imageMemoryBarrierCount = 1,
.pImageMemoryBarriers = &creationToTransferImageBarrier,
};
std::array transferToReadyImageBarriers{
// transferToReadyImageBarrier
VkImageMemoryBarrier2{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,
.dstAccessMask = VK_ACCESS_2_SHADER_SAMPLED_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = textureImage,
.subresourceRange = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = mipLevels-1,
.baseArrayLayer = 0,
.layerCount = 1,
},
},
VkImageMemoryBarrier2{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT,
.dstAccessMask = VK_ACCESS_2_SHADER_SAMPLED_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = textureImage,
.subresourceRange = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = mipLevels-1,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
},
}
};
VkDependencyInfo const transferToReadyDependency = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.pNext = nullptr,
.dependencyFlags = 0,
.memoryBarrierCount = 0,
.pMemoryBarriers = nullptr,
.bufferMemoryBarrierCount = 0,
.pBufferMemoryBarriers = nullptr,
.imageMemoryBarrierCount = static_cast<uint32_t>( transferToReadyImageBarriers.size() ),
.pImageMemoryBarriers = transferToReadyImageBarriers.data(),
};
VkImageSubresourceRange const mipLevelSubresource = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
};
std::array prepareNextMipLevelBarriers{
// prepareNextMipLevelSrcImageBarrier
VkImageMemoryBarrier2{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_TRANSFER_BIT,
.dstAccessMask = VK_ACCESS_2_TRANSFER_READ_BIT,
.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = textureImage,
.subresourceRange = mipLevelSubresource,
},
// prepareNextMipLevelDstImageBarrier
VkImageMemoryBarrier2{
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER_2,
.pNext = nullptr,
.srcStageMask = VK_PIPELINE_STAGE_2_COPY_BIT,
.srcAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.dstStageMask = VK_PIPELINE_STAGE_2_BLIT_BIT,
.dstAccessMask = VK_ACCESS_2_TRANSFER_WRITE_BIT,
.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.newLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = textureImage,
.subresourceRange = mipLevelSubresource,
}
};
VkDependencyInfo const prepareNextMipLevelDependency = {
.sType = VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
.pNext = nullptr,
.dependencyFlags = 0,
.memoryBarrierCount = 0,
.pMemoryBarriers = nullptr,
.bufferMemoryBarrierCount = 0,
.pBufferMemoryBarriers = nullptr,
.imageMemoryBarrierCount = static_cast<uint32_t>( prepareNextMipLevelBarriers.size() ),
.pImageMemoryBarriers = prepareNextMipLevelBarriers.data(),
};
vkBeginCommandBuffer( frameInUse.commandBuffer, &beginInfo );
{
VkImageSubresourceLayers imageSubresourceLayers = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
};
// TODO: Ensure `bufferRowLength` and `bufferImageHeight` are not required.
VkBufferImageCopy copyRegion = {
.bufferOffset = 0,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = imageSubresourceLayers,
.imageOffset = { 0, 0, 0 },
.imageExtent = { width, height, 1 }
};
// Start
vkCmdPipelineBarrier2( frameInUse.commandBuffer, &creationToTransferDependency );
// Staging -> Image L0
vkCmdCopyBufferToImage(
frameInUse.commandBuffer,
stagingBuffer,
textureImage,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1,
&copyRegion );
prepareNextMipLevelBarriers[0].subresourceRange.baseMipLevel = 0;
prepareNextMipLevelBarriers[1].subresourceRange.baseMipLevel = 1;
int32_t mipSrcWidth = static_cast<int32_t>( width );
int32_t mipSrcHeight = static_cast<int32_t>( height );
int32_t mipDstWidth = std::max( mipSrcWidth / 2, 1 );
int32_t mipDstHeight = std::max( mipSrcHeight / 2, 1 );
VkImageSubresourceLayers constexpr mipSubresourceLayers = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1,
};
VkImageBlit2 imageBlit = {
.sType = VK_STRUCTURE_TYPE_IMAGE_BLIT_2,
.pNext = nullptr,
.srcSubresource = mipSubresourceLayers,
.srcOffsets = { { 0, 0, 0 }, { mipSrcWidth, mipSrcHeight, 1 } },
.dstSubresource = mipSubresourceLayers,
.dstOffsets = { { 0, 0, 0 }, { mipDstWidth, mipDstHeight, 1 } },
};
imageBlit.srcSubresource.mipLevel = 0;
imageBlit.dstSubresource.mipLevel = 1;
imageBlit.srcOffsets[1].x = mipSrcWidth;
imageBlit.srcOffsets[1].y = mipSrcHeight;
imageBlit.dstOffsets[1].x = mipDstWidth;
imageBlit.dstOffsets[1].y = mipDstHeight;
VkBlitImageInfo2 blitInfo = {
.sType = VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2,
.pNext = nullptr,
.srcImage = textureImage,
.srcImageLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
.dstImage = textureImage,
.dstImageLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
.regionCount = 1,
.pRegions = &imageBlit,
.filter = VK_FILTER_LINEAR,
};
// MipMapping
for ( uint32_t dstMipLevel = 1; dstMipLevel < mipLevels; ++dstMipLevel )
{
vkCmdPipelineBarrier2( frameInUse.commandBuffer, &prepareNextMipLevelDependency );
vkCmdBlitImage2( frameInUse.commandBuffer, &blitInfo );
// Prep for NEXT iteration
mipSrcWidth = mipDstWidth;
mipSrcHeight = mipDstHeight;
mipDstWidth = std::max( mipSrcWidth / 2, 1 );
mipDstHeight = std::max( mipSrcHeight / 2, 1 );
imageBlit.srcSubresource.mipLevel = dstMipLevel;
imageBlit.dstSubresource.mipLevel = dstMipLevel + 1;
imageBlit.srcOffsets[1].x = mipSrcWidth;
imageBlit.srcOffsets[1].y = mipSrcHeight;
imageBlit.dstOffsets[1].x = mipDstWidth;
imageBlit.dstOffsets[1].y = mipDstHeight;
// Prep current mip level as source
prepareNextMipLevelBarriers[0].subresourceRange.baseMipLevel = dstMipLevel;
prepareNextMipLevelBarriers[1].subresourceRange.baseMipLevel = dstMipLevel + 1;
}
// End
vkCmdPipelineBarrier2( frameInUse.commandBuffer, &transferToReadyDependency );
}
vkEndCommandBuffer( frameInUse.commandBuffer );
VkSubmitInfo submitInfo = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = nullptr,
.waitSemaphoreCount = 0,
.pWaitSemaphores = nullptr,
.pWaitDstStageMask = nullptr,
.commandBufferCount = 1,
.pCommandBuffers = &frameInUse.commandBuffer,
.signalSemaphoreCount = 0,
.pSignalSemaphores = nullptr,
};
VK_CHECK( vkQueueSubmit( renderDevice.directQueue, 1, &submitInfo, frameInUse.frameReadyToReuse ) );
// Do not reset this. Else, the frame will never be available to the main loop.
VK_CHECK( vkWaitForFences( renderDevice.device, 1, &frameInUse.frameReadyToReuse, VK_TRUE, UINT64_MAX ) );
renderDevice.frameIndex = ( renderDevice.frameIndex + 1 ) % renderDevice.getNumFrames();
}
vmaDestroyBuffer( renderDevice.gpuAllocator, stagingBuffer, stagingAllocation );
material = { std::move( texture ), sampler };
}
entities[count++] = Entity( transform, mesh, std::move( material ) );
return entities + count;
}
void EntityManager::destroyEntity( Entity* entity )
{
ASSERT( entity );
if ( !entity->isInit() ) return;
VkDevice const device = pRenderDevice->device;
VmaAllocator const allocator = pRenderDevice->gpuAllocator;
vkDestroySampler( device, Take( entity->material().sampler ), nullptr );
pRenderDevice->textureManager->freeTexture( std::move( entity->material().texture ) );
vmaDestroyBuffer( allocator, Take( entity->mesh().vertexBuffer ), Take( entity->mesh().vertexBufferAllocation ) );
// TODO: Leaking descriptor set.
}
void EntityManager::destroy()
{
Entity const* end = entities + capacity;
for ( Entity* iter = entities; iter != end; ++iter )
{
destroyEntity( iter );
}
entities = nullptr;
capacity = 0;
count = 0;
}
EntityManager::~EntityManager()
{
assert( !entities );
}
EntityManager* EntityManager_Create( GlobalMemory* mem, RenderDevice* renderDevice, uint32_t const capacity )
{
Entity* data = reinterpret_cast<Entity*>( mem->allocate( capacity * sizeof( Entity ), alignof( Entity ) ) );
memset( data, 0, capacity * sizeof( Entity ) );
std::byte* alloc = mem->allocate( sizeof( EntityManager ), alignof( EntityManager ) );
return new ( alloc ) EntityManager{ renderDevice, data, capacity };
}