Refactored to directly write on the cubemap.
This commit is contained in:
parent
6b5442527f
commit
6d39576b5c
|
|
@ -11,7 +11,10 @@ void
|
|||
Image::Destroy(const Device *device)
|
||||
{
|
||||
if (!IsValid() || !IsOwned())
|
||||
{
|
||||
m_MipLevels_ = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
device->m_Device.destroy(m_View, nullptr);
|
||||
vmaDestroyImage(device->m_Allocator, m_Image, m_Allocation);
|
||||
|
|
@ -280,7 +283,6 @@ void
|
|||
StorageTexture::Init(const Device *device, vk::Extent2D extent, const vk::Format imageFormat, const bool isSampled,
|
||||
cstr name)
|
||||
{
|
||||
|
||||
// Reasoning:
|
||||
// Transfer Src and Dst to copy to and from the buffer since Storage will often be loaded with info, and read for
|
||||
// results.
|
||||
|
|
@ -342,3 +344,72 @@ StorageTexture::Init(const Device *device, vk::Extent2D extent, const vk::Format
|
|||
|
||||
device->SetName(m_Image, name);
|
||||
}
|
||||
|
||||
void
|
||||
StorageTextureCube::Init(const Device* device, u32 cubeSide, vk::Format imageFormat, bool isSampled, bool isMipMapped, cstr name)
|
||||
{
|
||||
// Reasoning:
|
||||
// Transfer Src and Dst to copy to and from the buffer since Storage will often be loaded with info, and read for
|
||||
// results.
|
||||
auto usage =
|
||||
vk::ImageUsageFlagBits::eStorage | vk::ImageUsageFlagBits::eTransferSrc | vk::ImageUsageFlagBits::eTransferDst;
|
||||
if (isSampled)
|
||||
{
|
||||
WARN_IF(!IsPowerOfTwo(cubeSide), "Image {1} is {0}x{0} (Non Power of Two)",
|
||||
cubeSide, name ? name : "<unnamed>");
|
||||
usage |= vk::ImageUsageFlagBits::eSampled;
|
||||
}
|
||||
|
||||
const u32 mipLevels = isMipMapped ? 1 + Cast<u32>(floor(log2(cubeSide))) : 1;
|
||||
assert(mipLevels <= MIP_MASK);
|
||||
|
||||
vk::ImageCreateInfo imageCreateInfo = {
|
||||
.flags = vk::ImageCreateFlagBits::eCubeCompatible,
|
||||
.imageType = vk::ImageType::e2D,
|
||||
.format = imageFormat,
|
||||
.extent = {cubeSide, cubeSide, 1},
|
||||
.mipLevels = mipLevels,
|
||||
.arrayLayers = 6,
|
||||
.samples = vk::SampleCountFlagBits::e1,
|
||||
.tiling = vk::ImageTiling::eOptimal,
|
||||
.usage = usage,
|
||||
.sharingMode = vk::SharingMode::eExclusive,
|
||||
.initialLayout = vk::ImageLayout::eUndefined,
|
||||
};
|
||||
constexpr VmaAllocationCreateInfo allocationCreateInfo = {
|
||||
.flags = {},
|
||||
.usage = VMA_MEMORY_USAGE_AUTO,
|
||||
};
|
||||
|
||||
VkImage image;
|
||||
VmaAllocation allocation;
|
||||
auto result = Cast<vk::Result>(vmaCreateImage(device->m_Allocator, Recast<VkImageCreateInfo *>(&imageCreateInfo),
|
||||
&allocationCreateInfo, &image, &allocation, nullptr));
|
||||
ERROR_IF(Failed(result), "Could not allocate image {}. Cause: {}", name, result) THEN_ABORT(result);
|
||||
|
||||
vk::ImageView view;
|
||||
const vk::ImageViewCreateInfo imageViewCreateInfo = {
|
||||
.image = image,
|
||||
.viewType = vk::ImageViewType::eCube,
|
||||
.format = imageFormat,
|
||||
.components = {},
|
||||
.subresourceRange =
|
||||
{
|
||||
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
||||
.baseMipLevel = 0,
|
||||
.levelCount = mipLevels,
|
||||
.baseArrayLayer = 0,
|
||||
.layerCount = 6,
|
||||
},
|
||||
};
|
||||
result = device->m_Device.createImageView(&imageViewCreateInfo, nullptr, &view);
|
||||
ERROR_IF(Failed(result), "Could not create image view {}. Cause: {}", name, result) THEN_ABORT(result);
|
||||
|
||||
m_Image = image;
|
||||
m_View = view;
|
||||
m_Allocation = allocation;
|
||||
m_Extent = imageCreateInfo.extent;
|
||||
m_MipLevels_ = mipLevels | OWNED_BIT | VALID_BIT;
|
||||
|
||||
device->SetName(m_Image, name);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -59,27 +59,45 @@ struct Texture : Image
|
|||
void Init(const Device *device, vk::Extent2D extent, vk::Format imageFormat, bool isMipMapped, cstr name = nullptr);
|
||||
};
|
||||
|
||||
static_assert(sizeof(Texture) == sizeof(Image));
|
||||
|
||||
struct TextureCube : Texture
|
||||
{
|
||||
void
|
||||
Init(const Device *device, u32 cubeSide, vk::Format imageFormat, bool isMipMapped = false, cstr name = nullptr);
|
||||
};
|
||||
|
||||
static_assert(sizeof(TextureCube) == sizeof(Image));
|
||||
|
||||
struct AttachmentImage : Image
|
||||
{
|
||||
void Init(const Device *device, vk::Extent2D extent, vk::Format imageFormat, cstr name = nullptr);
|
||||
};
|
||||
|
||||
static_assert(sizeof(AttachmentImage) == sizeof(Image));
|
||||
|
||||
struct DepthImage : Image
|
||||
{
|
||||
void Init(const Device *device, vk::Extent2D extent, cstr name = nullptr);
|
||||
};
|
||||
|
||||
static_assert(sizeof(DepthImage) == sizeof(Image));
|
||||
|
||||
struct StorageTexture : Texture
|
||||
{
|
||||
void Init(const Device *device, vk::Extent2D extent, vk::Format imageFormat, bool isSampled, cstr name = nullptr);
|
||||
};
|
||||
|
||||
static_assert(sizeof(StorageTexture) == sizeof(Image));
|
||||
|
||||
struct StorageTextureCube : StorageTexture
|
||||
{
|
||||
void Init(const Device *device, u32 cubeSide, vk::Format imageFormat, bool isSampled, bool isMipMapped = false,
|
||||
cstr name = nullptr);
|
||||
};
|
||||
|
||||
static_assert(sizeof(StorageTextureCube) == sizeof(Image));
|
||||
|
||||
inline bool
|
||||
Image::IsValid() const
|
||||
{
|
||||
|
|
|
|||
|
|
@ -323,7 +323,7 @@ GpuResourceManager::Release(Texture *texture, TextureHandle handle)
|
|||
}
|
||||
|
||||
TextureHandle
|
||||
GpuResourceManager::Commit(Texture *texture)
|
||||
GpuResourceManager::CommitTexture(Texture *texture)
|
||||
{
|
||||
TextureHandle handle = m_TextureManager.Commit(texture);
|
||||
|
||||
|
|
@ -352,7 +352,7 @@ GpuResourceManager::Commit(Texture *texture)
|
|||
}
|
||||
|
||||
StorageTextureHandle
|
||||
GpuResourceManager::Commit(StorageTexture *storageTexture)
|
||||
GpuResourceManager::CommitStorageTexture(StorageTexture *storageTexture)
|
||||
{
|
||||
StorageTextureHandle handle = m_StorageTextureManager.Commit(storageTexture);
|
||||
|
||||
|
|
|
|||
|
|
@ -126,13 +126,13 @@ struct GpuResourceManager
|
|||
void Release(BufferHandle handle); // Release and Destroy
|
||||
void Release(StorageBuffer *storageBuffer, BufferHandle handle); // Release and Return
|
||||
|
||||
TextureHandle Commit(Texture *texture); // Commit to GPU and take Ownership
|
||||
TextureHandle CommitTexture(Texture *texture); // Commit to GPU and take Ownership
|
||||
void Release(TextureHandle handle); // Release and Destroy
|
||||
void Release(Texture *texture, TextureHandle handle); // Release and Return
|
||||
|
||||
StorageTextureHandle Commit(StorageTexture *storageTexture);
|
||||
void Release(StorageTextureHandle handle);
|
||||
void Release(StorageTexture *texture, StorageTextureHandle handle);
|
||||
StorageTextureHandle CommitStorageTexture(StorageTexture *storageTexture); // What they said ^
|
||||
void Release(StorageTextureHandle handle); //
|
||||
void Release(StorageTexture *texture, StorageTextureHandle handle); //
|
||||
|
||||
void Update(); // Update all the descriptors required.
|
||||
|
||||
|
|
|
|||
|
|
@ -388,7 +388,7 @@ AssetLoader::LoadImageToGpu(StagingBuffer *stagingBuffer, tinygltf::Image *image
|
|||
m_CommandBuffer.endDebugUtilsLabelEXT();
|
||||
#endif
|
||||
|
||||
return m_ResourceManager->Commit(&texture);
|
||||
return m_ResourceManager->CommitTexture(&texture);
|
||||
}
|
||||
|
||||
Model
|
||||
|
|
|
|||
|
|
@ -5,12 +5,13 @@
|
|||
|
||||
#include "ibl_helpers.h"
|
||||
|
||||
#include "asset_loader.h"
|
||||
#include "device.h"
|
||||
#include "gpu_resource_manager.h"
|
||||
#include "helpers.h"
|
||||
#include "image.h"
|
||||
#include "asset_loader.h"
|
||||
#include "pipeline_utils.h"
|
||||
#include "EASTL/tuple.h"
|
||||
|
||||
constexpr cstr EQUIRECT_TO_CUBE_SHADER_FILE = "shader/eqrectToCube.cs.hlsl.spv";
|
||||
|
||||
|
|
@ -21,20 +22,10 @@ CreateCubeFromHdrEnv(AssetLoader *assetLoader, vk::Queue computeQueue, const u32
|
|||
GpuResourceManager *resMan = assetLoader->m_ResourceManager;
|
||||
const Device *pDevice = resMan->m_Device;
|
||||
|
||||
TextureCube cubeMap;
|
||||
cubeMap.Init(pDevice, cubeSide, vk::Format::eR16G16B16A16Sfloat, false, name ? name : "Env CubeMap");
|
||||
StorageTextureCube cubeMap;
|
||||
cubeMap.Init(pDevice, cubeSide, vk::Format::eR16G16B16A16Sfloat, true, false, name ? name : "Env CubeMap");
|
||||
StorageTextureHandle envStagingHandle = resMan->CommitStorageTexture(&cubeMap);
|
||||
|
||||
StorageTexture stagingTexture;
|
||||
stagingTexture.Init(pDevice, {cubeSide * 3, cubeSide * 2}, vk::Format::eR16G16B16A16Sfloat, false, "EnvStaging");
|
||||
auto envStagingHandle = resMan->Commit(&stagingTexture);
|
||||
|
||||
vk::ImageSubresourceRange stagingSubresRange = {
|
||||
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
||||
.baseMipLevel = 0,
|
||||
.levelCount = 1,
|
||||
.baseArrayLayer = 0,
|
||||
.layerCount = 1,
|
||||
};
|
||||
vk::ImageSubresourceRange cubeSubresRange = {
|
||||
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
||||
.baseMipLevel = 0,
|
||||
|
|
@ -52,48 +43,19 @@ CreateCubeFromHdrEnv(AssetLoader *assetLoader, vk::Queue computeQueue, const u32
|
|||
.newLayout = vk::ImageLayout::eGeneral,
|
||||
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||
.image = stagingTexture.m_Image,
|
||||
.subresourceRange = stagingSubresRange,
|
||||
.image = cubeMap.m_Image,
|
||||
.subresourceRange = cubeSubresRange,
|
||||
};
|
||||
vk::DependencyInfo readyToWriteDependency = {
|
||||
.imageMemoryBarrierCount = 1,
|
||||
.pImageMemoryBarriers = &readyToWriteBarrier,
|
||||
};
|
||||
vk::ImageMemoryBarrier2 stagingReadyForTransfer = {
|
||||
vk::ImageMemoryBarrier2 cubemapToRead = {
|
||||
.srcStageMask = vk::PipelineStageFlagBits2::eComputeShader,
|
||||
.srcAccessMask = vk::AccessFlagBits2::eShaderStorageWrite,
|
||||
.dstStageMask = vk::PipelineStageFlagBits2::eAllTransfer,
|
||||
.dstAccessMask = vk::AccessFlagBits2::eTransferRead,
|
||||
.oldLayout = vk::ImageLayout::eGeneral,
|
||||
.newLayout = vk::ImageLayout::eTransferSrcOptimal,
|
||||
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||
.image = stagingTexture.m_Image,
|
||||
.subresourceRange = stagingSubresRange,
|
||||
};
|
||||
vk::ImageMemoryBarrier2 cubeReadyForTransfer = {
|
||||
.srcStageMask = vk::PipelineStageFlagBits2::eTopOfPipe,
|
||||
.srcAccessMask = vk::AccessFlagBits2::eNone,
|
||||
.dstStageMask = vk::PipelineStageFlagBits2::eAllTransfer,
|
||||
.dstAccessMask = vk::AccessFlagBits2::eTransferWrite,
|
||||
.oldLayout = vk::ImageLayout::eUndefined,
|
||||
.newLayout = vk::ImageLayout::eTransferDstOptimal,
|
||||
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||
.image = cubeMap.m_Image,
|
||||
.subresourceRange = cubeSubresRange,
|
||||
};
|
||||
eastl::array transferBarriers = {stagingReadyForTransfer, cubeReadyForTransfer};
|
||||
vk::DependencyInfo preTransferDependency = {
|
||||
.imageMemoryBarrierCount = transferBarriers.size(),
|
||||
.pImageMemoryBarriers = transferBarriers.data(),
|
||||
};
|
||||
vk::ImageMemoryBarrier2 cubemapToRead = {
|
||||
.srcStageMask = vk::PipelineStageFlagBits2::eAllTransfer,
|
||||
.srcAccessMask = vk::AccessFlagBits2::eTransferWrite,
|
||||
.dstStageMask = vk::PipelineStageFlagBits2::eFragmentShader,
|
||||
.dstAccessMask = vk::AccessFlagBits2::eShaderSampledRead,
|
||||
.oldLayout = vk::ImageLayout::eTransferDstOptimal,
|
||||
.oldLayout = vk::ImageLayout::eGeneral,
|
||||
.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal,
|
||||
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||
|
|
@ -105,44 +67,6 @@ CreateCubeFromHdrEnv(AssetLoader *assetLoader, vk::Queue computeQueue, const u32
|
|||
.pImageMemoryBarriers = &cubemapToRead,
|
||||
};
|
||||
|
||||
eastl::array<vk::ImageCopy2, 6> imageCopies;
|
||||
for (i32 i = 0; i < 6; ++i)
|
||||
{
|
||||
imageCopies[i] = vk::ImageCopy2{
|
||||
.srcSubresource =
|
||||
{
|
||||
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
||||
.mipLevel = 0,
|
||||
.baseArrayLayer = 0,
|
||||
.layerCount = 1,
|
||||
},
|
||||
.srcOffset =
|
||||
{
|
||||
(i % 3) * Cast<i32>(cubeSide),
|
||||
(i / 3) * Cast<i32>(cubeSide),
|
||||
0,
|
||||
},
|
||||
.dstSubresource =
|
||||
{
|
||||
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
||||
.mipLevel = 0,
|
||||
.baseArrayLayer = Cast<u32>(i),
|
||||
.layerCount = 1,
|
||||
},
|
||||
.dstOffset = {0, 0, 0},
|
||||
.extent = {cubeSide, cubeSide, 1},
|
||||
};
|
||||
}
|
||||
|
||||
vk::CopyImageInfo2 copyFlatToCube = {
|
||||
.srcImage = stagingTexture.m_Image,
|
||||
.srcImageLayout = vk::ImageLayout::eTransferSrcOptimal,
|
||||
.dstImage = cubeMap.m_Image,
|
||||
.dstImageLayout = vk::ImageLayout::eTransferDstOptimal,
|
||||
.regionCount = imageCopies.size(),
|
||||
.pRegions = imageCopies.data(),
|
||||
};
|
||||
|
||||
struct WorkloadPushConstants
|
||||
{
|
||||
TextureHandle m_HdrEnvHandle;
|
||||
|
|
@ -213,10 +137,6 @@ CreateCubeFromHdrEnv(AssetLoader *assetLoader, vk::Queue computeQueue, const u32
|
|||
cmd.pushConstants(pipelineLayout, vk::ShaderStageFlagBits::eCompute, 0, sizeof pushConstants, &pushConstants);
|
||||
cmd.dispatch(cubeSide / 16, cubeSide / 16, 6);
|
||||
|
||||
cmd.pipelineBarrier2(&preTransferDependency);
|
||||
|
||||
cmd.copyImage2(©FlatToCube);
|
||||
|
||||
cmd.pipelineBarrier2(&cubemapToReadDependency);
|
||||
|
||||
#if !defined(NDEBUG)
|
||||
|
|
@ -241,9 +161,10 @@ CreateCubeFromHdrEnv(AssetLoader *assetLoader, vk::Queue computeQueue, const u32
|
|||
|
||||
AbortIfFailed(pDevice->m_Device.resetCommandPool(assetLoader->m_CommandPool, {}));
|
||||
|
||||
resMan->Release(envStagingHandle);
|
||||
cubeMap = {};
|
||||
resMan->Release(&cubeMap, envStagingHandle);
|
||||
pDevice->m_Device.destroy(pipeline, nullptr);
|
||||
pDevice->m_Device.destroy(pipelineLayout, nullptr);
|
||||
|
||||
return resMan->Commit(&cubeMap);
|
||||
return resMan->CommitTexture(&cubeMap);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,8 +8,10 @@
|
|||
#include "global.h"
|
||||
#include "gpu_resource_manager.h"
|
||||
|
||||
struct Pipeline;
|
||||
struct Texture;
|
||||
struct TextureCube;
|
||||
struct AssetLoader;
|
||||
|
||||
TextureHandle CreateCubeFromHdrEnv(AssetLoader *assetLoader, vk::Queue computeQueue, u32 cubeSide, TextureHandle hdrEnv, cstr name = nullptr);
|
||||
TextureHandle CreateCubeFromHdrEnv(AssetLoader *assetLoader, vk::Queue computeQueue, u32 cubeSide, TextureHandle hdrEnv,
|
||||
cstr name = nullptr);
|
||||
|
|
@ -178,7 +178,7 @@ main(int, char **)
|
|||
Model model = assetLoader.LoadModelToGpu(MODEL_FILE);
|
||||
Texture environment;
|
||||
assetLoader.LoadHdrImage(&environment, BACKDROP_FILE);
|
||||
auto envHandle = resourceManager.Commit(&environment);
|
||||
auto envHandle = resourceManager.CommitTexture(&environment);
|
||||
|
||||
TextureHandle texCube = CreateCubeFromHdrEnv(&assetLoader, graphicsQueue, 1024, envHandle, "Cube Env");
|
||||
|
||||
|
|
|
|||
|
|
@ -62,3 +62,4 @@ static const float PI = 3.14159265f;
|
|||
[[vk::binding(1, 0)]] SamplerState ImmutableSamplers[];
|
||||
|
||||
[[vk::binding(2, 0)]] RWTexture2D<float4> StorageTextures[];
|
||||
[[vk::binding(2, 0)]] RWTexture2DArray<float4> StorageTextureArrays[];
|
||||
|
|
|
|||
|
|
@ -50,9 +50,6 @@ void main(uint3 GlobalInvocationID : SV_DispatchThreadID)
|
|||
LocalDir = float3((GlobalInvocationID.x - HalfSide) * AxisSign, GlobalInvocationID.y - HalfSide, -AxisSign * HalfSide);
|
||||
}
|
||||
|
||||
uint TileX = GlobalInvocationID.z % 3;
|
||||
uint TileY = GlobalInvocationID.z / 3;
|
||||
|
||||
float2 UV = SampleSphericalMap(normalize(LocalDir));
|
||||
StorageTextures[pcb.OutputTextureHandle][GlobalInvocationID.xy + uint2(TileX, TileY) * pcb.CubeSize] = Textures[pcb.HdrEnvHandle].SampleLevel(ImmutableSamplers[pcb.HdrEnvHandle], UV, 0);
|
||||
StorageTextureArrays[pcb.OutputTextureHandle][GlobalInvocationID.xyz] = Textures[pcb.HdrEnvHandle].SampleLevel(ImmutableSamplers[pcb.HdrEnvHandle], UV, 0);
|
||||
}
|
||||
Loading…
Reference in New Issue