Skip to content

Commit

Permalink
Vulkan validation error fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
ksuprynowicz committed Jan 6, 2025
1 parent 352073e commit 984a506
Show file tree
Hide file tree
Showing 6 changed files with 144 additions and 193 deletions.
127 changes: 104 additions & 23 deletions libraries/gpu-vk/src/gpu/vk/VKBackend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -889,16 +889,21 @@ void VKBackend::updateVkDescriptorWriteSetsTexture(VkDescriptorSet target) {
// VKTODO: move vulkan texture creation to the transfer parts
VKTexture* texture = syncGPUObject(*_resource._textures[i].texture);
VkDescriptorImageInfo imageInfo{};
imageInfo.imageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
if (texture) {
imageInfo = texture->getDescriptorImageInfo();
} else {
}
if (imageInfo.imageLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
qDebug() << "Writing descriptor " << i << " with texture: " << _resource._textures[i].texture->source();
if (_resource._textures[i].texture) {
if (texture) {
qDebug() << "Warning: Texture " + _resource._textures[i].texture->source() + " being bound at input slot "
<< i << " is in VK_IMAGE_LAYOUT_UNDEFINED layout.";
} else if (_resource._textures[i].texture) {
qDebug() << "Cannot sync texture during descriptor " << i
<< " write: " << _resource._textures[i].texture->source();
} else {
qDebug() << "Texture is null during descriptor " << i
<< " write: " << _resource._textures[i].texture->source();
<< " write.";
}
imageInfo = _defaultTextureImageInfo;
}
Expand Down Expand Up @@ -1088,7 +1093,7 @@ void VKBackend::updateRenderPass() {
auto framebuffer = syncGPUObject(*_cache.pipelineState.framebuffer);

// Current render pass is already up to date.
// VKTODO: check if framebuffer has changed and if so update render pass too
// VKTODO: check if framebuffer has changed and if so update render pass too.
if (_currentVkRenderPass == renderPass && _currentVkFramebuffer == framebuffer->vkFramebuffer) {
return;
}
Expand Down Expand Up @@ -2667,14 +2672,19 @@ void VKBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) {
Q_ASSERT(TextureUsageType::RENDERBUFFER == surface->getUsageType());
vkTexture = backend->syncGPUObject(*surface.get());*/
auto texture = renderBuffers[i]._texture;
VKAttachmentTexture *attachmentTexture = nullptr;
if (texture) {
auto gpuTexture = syncGPUObject(*texture);
if (gpuTexture) {
attachmentTexture = dynamic_cast<VKAttachmentTexture*>(gpuTexture);
Q_ASSERT(attachmentTexture);
}
if (!texture) {
// Last texture of the key can be depth stencil attachment
Q_ASSERT(i + 1 == key.size());
texture = framebuffer->getDepthStencilBuffer();
}
Q_ASSERT(texture);
VKAttachmentTexture *attachmentTexture = nullptr;
//if (texture) {
auto gpuTexture = syncGPUObject(*texture);
Q_ASSERT(gpuTexture);
attachmentTexture = dynamic_cast<VKAttachmentTexture*>(gpuTexture);
Q_ASSERT(attachmentTexture);
//}
VkAttachmentDescription attachment{};
attachment.format = formatAndLayout.first;
attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
Expand All @@ -2685,35 +2695,33 @@ void VKBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) {
}
//attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
attachment.samples = VK_SAMPLE_COUNT_1_BIT;
if (isDepthStencilFormat(formatAndLayout.first)) {
if (texture->isDepthStencilRenderTarget()) {
clearValues.push_back(VkClearValue{.depthStencil = VkClearDepthStencilValue{ depth, (uint32_t)stencil }});
if (masks & Framebuffer::BUFFER_DEPTH) {
attachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
if (!formatHasStencil(attachment.format)) {
attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
}
} else {
attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
}
// Texture state needs to be updated
auto depthStencil = framebuffer->getDepthStencilBuffer();
Q_ASSERT(depthStencil);
auto gpuDepthStencil = syncGPUObject(*depthStencil);
Q_ASSERT(gpuDepthStencil);
auto depthStencilAttachmentTexture = dynamic_cast<VKAttachmentTexture*>(gpuDepthStencil);
Q_ASSERT(depthStencilAttachmentTexture);

if ((masks & Framebuffer::BUFFER_DEPTH) && (masks & Framebuffer::BUFFER_STENCIL)) {

if (((masks & Framebuffer::BUFFER_DEPTH) && (masks & Framebuffer::BUFFER_STENCIL))
|| ((masks & Framebuffer::BUFFER_DEPTH) && !formatHasStencil(attachment.format))) {
attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
attachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
depthStencilAttachmentTexture->_vkImageLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
attachmentTexture->_vkImageLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
depthReference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
} else {
if (depthStencilAttachmentTexture->_vkImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
if (attachmentTexture->_vkImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
attachment.initialLayout = VK_IMAGE_LAYOUT_GENERAL;
attachment.finalLayout = VK_IMAGE_LAYOUT_GENERAL;
depthReference.layout = VK_IMAGE_LAYOUT_GENERAL;
} else {
attachment.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
attachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
depthStencilAttachmentTexture->_vkImageLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
attachmentTexture->_vkImageLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
depthReference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
}
}
Expand Down Expand Up @@ -2782,7 +2790,80 @@ void VKBackend::do_clearFramebuffer(const Batch& batch, size_t paramOffset) {
vkCmdBeginRenderPass(_currentCommandBuffer, &beginInfo, VK_SUBPASS_CONTENTS_INLINE);
vkCmdEndRenderPass(_currentCommandBuffer);
_currentFrame->_renderPasses.push_back(renderPass); // To be deleted after frame rendering is complete

// VKTODO: some sort of barrier is needed?
for (auto &renderBuffer : renderBuffers) {
auto texture = renderBuffer._texture;
if (!texture) {
continue;
}
auto gpuTexture = syncGPUObject(*texture);
Q_ASSERT(gpuTexture);
auto *attachmentTexture = dynamic_cast<VKAttachmentTexture*>(gpuTexture);
Q_ASSERT(attachmentTexture);
if (masks & Framebuffer::BUFFER_COLORS) {
if (attachmentTexture->_gpuObject.isDepthStencilRenderTarget()) {
// VKTODO: Check if the same depth render target is used as one of the inputs, if so then don't update it here
VkImageSubresourceRange mipSubRange = {};
mipSubRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
mipSubRange.baseMipLevel = 0;
mipSubRange.levelCount = 1;
mipSubRange.layerCount = 1;
vks::tools::insertImageMemoryBarrier(_currentCommandBuffer, attachmentTexture->_vkImage,
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // VKTODO: should be
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, // VKTODO
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, mipSubRange); // VKTODO: what stage mask for depth stencil?
attachmentTexture->_vkImageLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
} else {
VkImageSubresourceRange mipSubRange = {};
mipSubRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
mipSubRange.baseMipLevel = 0;
mipSubRange.levelCount = 1;
mipSubRange.layerCount = 1;
vks::tools::insertImageMemoryBarrier(_currentCommandBuffer, attachmentTexture->_vkImage,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VKTODO: should be
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, // VKTODO
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, mipSubRange);
attachmentTexture->_vkImageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
}
}
}

if (masks & Framebuffer::BUFFER_STENCIL || masks & Framebuffer::BUFFER_DEPTH) {
auto texture = framebuffer->getDepthStencilBuffer();
Q_ASSERT(texture);
auto gpuTexture = syncGPUObject(*texture);
Q_ASSERT(gpuTexture);
auto *attachmentTexture = dynamic_cast<VKAttachmentTexture*>(gpuTexture);
Q_ASSERT(attachmentTexture);

if ( attachmentTexture->_vkImage == (VkImage)(0x260000000026UL)) {
printf("0x260000000026UL");
}

VkImageSubresourceRange mipSubRange = {};
mipSubRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
if (formatHasStencil(evalTexelFormatInternal(texture->getTexelFormat()))) {
mipSubRange.aspectMask |= VK_IMAGE_ASPECT_STENCIL_BIT;
}
mipSubRange.baseMipLevel = 0;
mipSubRange.levelCount = 1;
mipSubRange.layerCount = 1;
vks::tools::insertImageMemoryBarrier(_currentCommandBuffer, attachmentTexture->_vkImage,
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // VKTODO: should be
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, // VKTODO
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, mipSubRange); // VKTODO: what stage mask for depth stencil?
attachmentTexture->_vkImageLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
}

/*VKuint glmask = 0;
if (masks & Framebuffer::BUFFER_STENCIL) {
Expand Down
30 changes: 17 additions & 13 deletions libraries/gpu-vk/src/gpu/vk/VKPipelineCache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,8 @@ Cache::Pipeline::PipelineLayout Cache::Pipeline::getPipelineAndDescriptorLayout(
Cache::Pipeline::RenderpassKey Cache::Pipeline::getRenderPassKey(gpu::Framebuffer* framebuffer) const {
RenderpassKey result;
if (!framebuffer) {
result.emplace_back(VK_FORMAT_R8G8B8A8_SRGB, VK_IMAGE_LAYOUT_UNDEFINED);
result.emplace_back(VK_FORMAT_R8G8B8A8_SRGB, VK_IMAGE_LAYOUT_UNDEFINED); // VKTODO: this is definitely wrong, why is it that way?
Q_ASSERT(false);
} else {
for (const auto& attachment : framebuffer->getRenderBuffers()) {
if (attachment.isValid()) {
Expand Down Expand Up @@ -191,19 +192,19 @@ VkRenderPass Cache::Pipeline::getRenderPass(const vks::Context& context) {
auto itr = _renderPassMap.find(key);
if (itr == _renderPassMap.end()) {
auto &renderBuffers = framebuffer->getRenderBuffers();
size_t renderBufferIndex = 0;
std::vector<VkAttachmentDescription> attachments;
attachments.reserve(key.size());
std::vector<VkAttachmentReference> colorAttachmentReferences;
VkAttachmentReference depthReference{};
for (const auto& formatAndLayout : key) {
Q_ASSERT(renderBufferIndex < renderBuffers.size());
std::shared_ptr<gpu::Texture> texture = renderBuffers[renderBufferIndex]._texture;
if (isDepthStencilFormat(formatAndLayout.first)) {
for (size_t i = 0; i < key.size(); i++) {
Q_ASSERT(i < renderBuffers.size());
std::shared_ptr<gpu::Texture> texture = renderBuffers[i]._texture;
if (!texture) {
// Last texture of the key can be depth stencil attachment
Q_ASSERT(i + 1 == key.size());
texture = framebuffer->getDepthStencilBuffer();
} else {
texture = renderBuffers[renderBufferIndex]._texture;
renderBufferIndex++;
texture = renderBuffers[i]._texture;
}
VKAttachmentTexture *attachmentTexture = nullptr;
if (texture) {
Expand All @@ -213,11 +214,11 @@ VkRenderPass Cache::Pipeline::getRenderPass(const vks::Context& context) {
}
}
VkAttachmentDescription attachment{};
attachment.format = formatAndLayout.first;
attachment.format = key[i].first;
// Framebuffers are always cleared with a separate command in the renderer/
if (!attachmentTexture || attachmentTexture->getVkImageLayout() == VK_IMAGE_LAYOUT_UNDEFINED) {
attachment.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
} else {
attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
Expand All @@ -226,10 +227,11 @@ VkRenderPass Cache::Pipeline::getRenderPass(const vks::Context& context) {
attachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
//attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
attachment.samples = VK_SAMPLE_COUNT_1_BIT;
if (isDepthStencilFormat(formatAndLayout.first)) {
if (texture->isDepthStencilRenderTarget()) {
if (!attachmentTexture || attachmentTexture->getVkImageLayout() == VK_IMAGE_LAYOUT_UNDEFINED) {
attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
attachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
depthReference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
} else {
if (attachmentTexture->getVkImageLayout() == VK_IMAGE_LAYOUT_GENERAL) {
attachment.initialLayout = VK_IMAGE_LAYOUT_GENERAL;
Expand Down Expand Up @@ -263,7 +265,9 @@ VkRenderPass Cache::Pipeline::getRenderPass(const vks::Context& context) {
{
VkSubpassDescription subpass{};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
if (depthReference.layout != VK_IMAGE_LAYOUT_UNDEFINED) {
//if (depthReference.layout != VK_IMAGE_LAYOUT_UNDEFINED) {
if (framebuffer->getDepthStencilBuffer()) {
Q_ASSERT(depthReference.layout != VK_IMAGE_LAYOUT_UNDEFINED);
subpass.pDepthStencilAttachment = &depthReference;
}
subpass.colorAttachmentCount = (uint32_t)colorAttachmentReferences.size();
Expand Down
14 changes: 13 additions & 1 deletion libraries/gpu-vk/src/gpu/vk/VKShared.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -428,7 +428,7 @@ VkFormat gpu::vk::evalTexelFormatInternal(const gpu::Element& dstFormat) {
return result;
}

bool gpu::vk::isDepthStencilFormat(VkFormat format) {
/*bool gpu::vk::isDepthStencilFormat(VkFormat format) {
switch (format) {
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_X8_D24_UNORM_PACK32:
Expand All @@ -442,6 +442,18 @@ bool gpu::vk::isDepthStencilFormat(VkFormat format) {
break;
}
return false;
}*/
bool gpu::vk::formatHasStencil(VkFormat format) {
switch (format) {
case VK_FORMAT_S8_UINT:
case VK_FORMAT_D16_UNORM_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT_S8_UINT:
return true;
default:
break;
}
return false;
}

VkColorComponentFlags gpu::vk::colorMaskToVk(const gpu::State::ColorMask &mask) {
Expand Down
3 changes: 2 additions & 1 deletion libraries/gpu-vk/src/gpu/vk/VKShared.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,8 @@ struct ShaderObject {

VkFormat evalTexelFormatInternal(const Element& dstFormat);

bool isDepthStencilFormat(VkFormat format);
//bool isDepthStencilFormat(VkFormat format);
bool formatHasStencil(VkFormat format);

VkColorComponentFlags colorMaskToVk(const gpu::State::ColorMask &mask);

Expand Down
13 changes: 8 additions & 5 deletions libraries/gpu-vk/src/gpu/vk/VKTexture.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -346,7 +346,7 @@ void VKStrictResourceTexture::createTexture(VKBackend &backend) {
if (_gpuObject.getType() == Texture::TEX_CUBE) {
Q_ASSERT(_gpuObject.getNumFaces() == 6);
face_count = 6;
}else{
} else {
Q_ASSERT(_gpuObject.getNumFaces() == 1);
}

Expand All @@ -355,17 +355,17 @@ void VKStrictResourceTexture::createTexture(VKBackend &backend) {
bool needsBGRToRGB = false;
auto storedFormat = _gpuObject.getStoredMipFormat();
auto texelFormat = _gpuObject.getTexelFormat();
if ((storedFormat.getSemantic() == gpu::BGRA || storedFormat.getSemantic() == gpu::SBGRA)
&& !(texelFormat.getSemantic() == gpu::BGRA || texelFormat.getSemantic() == gpu::SBGRA)) {
if ((storedFormat.getSemantic() == gpu::BGRA || storedFormat.getSemantic() == gpu::SBGRA) &&
!(texelFormat.getSemantic() == gpu::BGRA || texelFormat.getSemantic() == gpu::SBGRA)) {
needsBGRToRGB = true;
}
auto storedVkFormat = evalTexelFormatInternal(_gpuObject.getStoredMipFormat());
auto texelVkFormat = evalTexelFormatInternal(_gpuObject.getTexelFormat());
if (storedFormat.getDimension() != texelFormat.getDimension()) {
if (storedFormat.getDimension() == gpu::VEC3 && texelFormat.getDimension() == gpu::VEC4) {
// It's best to make sure that this is not happening in unexpected cases and causing bugs
Q_ASSERT((storedVkFormat == VK_FORMAT_R8G8B8_UNORM && texelVkFormat == VK_FORMAT_R8G8B8A8_UNORM)
|| (storedVkFormat == VK_FORMAT_R8G8B8_UNORM && texelVkFormat == VK_FORMAT_R8G8B8A8_SRGB));
Q_ASSERT((storedVkFormat == VK_FORMAT_R8G8B8_UNORM && texelVkFormat == VK_FORMAT_R8G8B8A8_UNORM) ||
(storedVkFormat == VK_FORMAT_R8G8B8_UNORM && texelVkFormat == VK_FORMAT_R8G8B8A8_SRGB));
needsAddingAlpha = true;
} else {
qDebug() << "Format mismatch, stored: " << storedVkFormat << " texel: " << texelVkFormat;
Expand Down Expand Up @@ -408,6 +408,9 @@ void VKStrictResourceTexture::createTexture(VKBackend &backend) {
allocationCI.usage = VMA_MEMORY_USAGE_GPU_ONLY;
qDebug() << "storedSize: " << _gpuObject.getStoredSize();
VK_CHECK_RESULT(vmaCreateImage(vks::Allocation::getAllocator(), &imageCI, &allocationCI, &_vkImage, &_vmaAllocation, nullptr));
/*if ( _vkImage != (VkImage)(0x260000000026UL)) {
printf("0x260000000026UL");
}*/
}

void VKStrictResourceTexture::transfer(VKBackend &backend) {
Expand Down
Loading

0 comments on commit 984a506

Please sign in to comment.